Description
stringlengths
18
161k
Code
stringlengths
15
300k
codingutf8 2020 the huggingface inc team the microsoft research team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token pad tokenid 0 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 pad self assertequalvocabkeys1 cls self assertequalvocabkeys1 j self assertequallenvocabkeys 1012 def testvocabsizeself self assertequalself gettokenizer vocabsize 1012 def testfulltokenizerself tokenizer xlmprophetnettokenizersamplevocab keepaccentstrue tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a t est self assertlistequal tokenizer converttokenstoidstokens value tokenizer fairseqoffset for value in 285 46 10 170 382 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s ids tokenizer converttokenstoidstokens self assertlistequal ids value tokenizer fairseqoffset for value in 8 21 84 55 24 19 7 9 602 347 347 347 3 12 66 46 72 80 6 9 4 backtokens tokenizer convertidstotokensids self assertlistequal backtokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline unk 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s unk cachedproperty def bigtokenizerself return xlmprophetnettokenizer frompretrainedmicrosoftxprophetnetlargewiki100cased slow def testtokenizationbaseeasysymbolsself symbols hello world originaltokenizerencodings 35389 6672 49 2 self assertlistequaloriginaltokenizerencodings self bigtokenizer encodesymbols slow def testtokenizerintegrationself expectedencoding inputids 11073 82783 18 26 82783 549 51540 248 17209 1301 217 20 215186 1325 147 17209 1301 217 20 56370 53 122020 20 16477 27 87355 4548 20 4728 78392 17 159969 18 26 24491 629 15 538 22704 5439 15 2788 24491 9885 15 43534 605 15 814 18403 33200 29 15 43534 24458 12410 111 24966 83669 9637 144068 26 850 22346 27 147 24966 83669 83490 26 39113 735 27 689 656 2800 1339 4600 53 122020 115785 34 816 1339 46887 18 147 53905 1951 42238 41170 17732 834 436 15 27523 98733 217 147 5542 4981 930 17347 16 2 20091 629 94 82786 58 490 20 1528 84 53905 344 80592 110128 18822 5267 1306 62 152537 308 7997 401 124427 549 35442 225 109 15055 25748 147 7119 43712 34 767 135366 18 16 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 592 63784 119466 17 147808 88214 18 656 81 32 3296 10280 16 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamemicrosoftxprophetnetlargewiki100cased revision1acad1643ddd54a44df6a1b797ada8373685d90e coding utf 8 2020 the huggingface inc team the microsoft research team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token fmt skip
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class XLMProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLMProphetNetTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = "[PAD]" token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "[PAD]") self.assertEqual(vocab_keys[1], "[CLS]") self.assertEqual(vocab_keys[-1], "j") self.assertEqual(len(vocab_keys), 1_012) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_012) def test_full_tokenizer(self): tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ], ) @cached_property def big_tokenizer(self): return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [35389, 6672, 49, 2] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="microsoft/xprophetnet-large-wiki100-cased", revision="1acad1643ddd54a44df6a1b797ada8373685d90e", )
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license compare the actual values for a slice of last dim coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license batch_size sequence_length embedding_vector_dim compare the actual values for a slice of last dim
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class FlaxXLMRobertaModelIntegrationTest(unittest.TestCase): @slow def test_flax_xlm_roberta_base(self): model = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base") tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base") text = "The dog is cute and lives in the garden house" input_ids = jnp.array([tokenizer.encode(text)]) expected_output_shape = (1, 12, 768) expected_output_values_last_dim = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) output = model(input_ids)["last_hidden_state"] self.assertEqual(output.shape, expected_output_shape) self.assertTrue(jnp.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license compare the actual values for a slice coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license my dog is cute compare the actual values for a slice
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class TFFlaubertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base") features = { "input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]], dtype=tf.int32), "attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]], dtype=tf.int32), } output = model(features)["last_hidden_state"] expected_shape = tf.TensorShape((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = tf.convert_to_tensor( [ [ [0.0681762, 0.10894451, 0.06772504], [-0.06423668, 0.02366615, 0.04329344], [-0.06057295, 0.09974135, -0.00070584], ] ], dtype=tf.float32, ) self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the dog is cute and lives in the garden house xlmr torch hub load pytorchfairseq xlmr base xlmr eval expectedoutputvalueslastdim xlmr extractfeaturesinputids0 1 compare the actual values for a slice of last dim the dog is cute and lives in the garden house xlmr torch hub load pytorchfairseq xlmr large xlmr eval expectedoutputvalueslastdim xlmr extractfeaturesinputids0 1 compare the actual values for a slice of last dim coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the dog is cute and lives in the garden house batch_size sequence_length embedding_vector_dim xlmr torch hub load pytorch fairseq xlmr base xlmr eval expected_output_values_last_dim xlmr extract_features input_ids 0 1 compare the actual values for a slice of last dim the dog is cute and lives in the garden house batch_size sequence_length embedding_vector_dim xlmr torch hub load pytorch fairseq xlmr large xlmr eval expected_output_values_last_dim xlmr extract_features input_ids 0 1 compare the actual values for a slice of last dim
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class XLMRobertaModelIntegrationTest(unittest.TestCase): @slow def test_xlm_roberta_base(self): model = XLMRobertaModel.from_pretrained("xlm-roberta-base") input_ids = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) expected_output_shape = torch.Size((1, 12, 768)) expected_output_values_last_dim = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) with torch.no_grad(): output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) @slow def test_xlm_roberta_large(self): model = XLMRobertaModel.from_pretrained("xlm-roberta-large") input_ids = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) expected_output_shape = torch.Size((1, 12, 1024)) expected_output_values_last_dim = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) with torch.no_grad(): output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token pad tokenid 1 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 s self assertequalvocabkeys1 pad self assertequalvocabkeys1 mask self assertequallenvocabkeys 1002 def testvocabsizeself self assertequalself gettokenizer vocabsize 1002 def testfulltokenizerself tokenizer xlmrobertatokenizersamplevocab keepaccentstrue tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a t est self assertlistequal tokenizer converttokenstoidstokens value tokenizer fairseqoffset for value in 285 46 10 170 382 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s ids tokenizer converttokenstoidstokens self assertlistequal ids value tokenizer fairseqoffset for value in 8 21 84 55 24 19 7 2 602 347 347 347 3 12 66 46 72 80 6 2 4 unk 2 1 3 unk 2 1 3 backtokens tokenizer convertidstotokensids self assertlistequal backtokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline unk 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s unk overwrite from testtokenizationcommon to speed up test def testsavepretrainedself if not self testslowtokenizer as we don t have a slow version we can t compare the outputs between slow and fast versions return self tokenizerslist0 self rusttokenizerclass hfinternaltestingtinyxlmroberta for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs tokenizerp self tokenizerclass frompretrainedpretrainedname kwargs tmpdirname2 tempfile mkdtemp tokenizerrfiles tokenizerr savepretrainedtmpdirname2 tokenizerpfiles tokenizerp savepretrainedtmpdirname2 checks it save with the same files the tokenizer json file for the fast one self asserttrueanytokenizer json in f for f in tokenizerrfiles tokenizerrfiles tuplef for f in tokenizerrfiles if tokenizer json not in f self assertsequenceequaltokenizerrfiles tokenizerpfiles checks everything loads correctly in the same way tokenizerrp tokenizerr frompretrainedtmpdirname2 tokenizerpp tokenizerp frompretrainedtmpdirname2 check special tokens are set accordingly on rust and python for key in tokenizerpp specialtokensmap self asserttruehasattrtokenizerrp key self assertequalgetattrtokenizerrp key getattrtokenizerpp key self assertequalgetattrtokenizerrp key id getattrtokenizerpp key id shutil rmtreetmpdirname2 save tokenizer rust legacyformattrue tmpdirname2 tempfile mkdtemp tokenizerrfiles tokenizerr savepretrainedtmpdirname2 legacyformattrue tokenizerpfiles tokenizerp savepretrainedtmpdirname2 checks it save with the same files self assertsequenceequaltokenizerrfiles tokenizerpfiles checks everything loads correctly in the same way tokenizerrp tokenizerr frompretrainedtmpdirname2 tokenizerpp tokenizerp frompretrainedtmpdirname2 check special tokens are set accordingly on rust and python for key in tokenizerpp specialtokensmap self asserttruehasattrtokenizerrp key shutil rmtreetmpdirname2 save tokenizer rust legacyformatfalse tmpdirname2 tempfile mkdtemp tokenizerrfiles tokenizerr savepretrainedtmpdirname2 legacyformatfalse tokenizerpfiles tokenizerp savepretrainedtmpdirname2 checks it saved the tokenizer json file self asserttrueanytokenizer json in f for f in tokenizerrfiles checks everything loads correctly in the same way tokenizerrp tokenizerr frompretrainedtmpdirname2 tokenizerpp tokenizerp frompretrainedtmpdirname2 check special tokens are set accordingly on rust and python for key in tokenizerpp specialtokensmap self asserttruehasattrtokenizerrp key shutil rmtreetmpdirname2 cachedproperty def bigtokenizerself return xlmrobertatokenizer frompretrainedxlmrobertabase def testpicklablewithoutdiskself with tempfile namedtemporaryfile as f shutil copyfilesamplevocab f name tokenizer xlmrobertatokenizerf name keepaccentstrue pickledtokenizer pickle dumpstokenizer pickle loadspickledtokenizer def testrustandpythonfulltokenizersself if not self testrusttokenizer return tokenizer self gettokenizer rusttokenizer self getrusttokenizer sequence i was born in 92000 and this is fals tokens tokenizer tokenizesequence rusttokens rusttokenizer tokenizesequence self assertlistequaltokens rusttokens ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids rusttokenizer self getrusttokenizer ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids slow def testtokenizationbaseeasysymbolsself symbols hello world originaltokenizerencodings 0 35378 6661 38 2 xlmr torch hub load pytorchfairseq xlmr base xlmr large has same tokenizer xlmr eval xlmr encodesymbols self assertlistequaloriginaltokenizerencodings self bigtokenizer encodesymbols slow def testtokenizationbasehardsymbolsself symbols this is a very long text with a lot of weird characters such as also we will add words that should not exsist and be tokenized to unk such as saoneuhaoesuth originaltokenizerencodings 0 3293 83 10 4552 4989 7986 678 10 5915 111 179459 124850 4 6044 237 12 6 5 6 4 6780 705 15 1388 44 378 10114 711 152 20 6 5 22376 642 1221 15190 34153 450 5608 959 1119 57702 136 186 47 1098 29367 47 4426 what fairseq tokenizes from unk 3678 what fairseq tokenizes from unk unk 2740 what fairseq tokenizes from unk 3 what we tokenize from unk unk 6 residue from the tokenization an extra sentencepiece underline 4 6044 237 6284 50901 528 31 90 34 927 2 xlmr torch hub load pytorchfairseq xlmr base xlmr large has same tokenizer xlmr eval xlmr encodesymbols self assertlistequaloriginaltokenizerencodings self bigtokenizer encodesymbols slow def testtokenizerintegrationself expectedencoding inputids 0 11062 82772 7 15 82772 538 51529 237 17198 1290 206 9 215175 1314 136 17198 1290 206 9 56359 42 122009 9 16466 16 87344 4537 9 4717 78381 6 159958 7 15 24480 618 4 527 22693 5428 4 2777 24480 9874 4 43523 594 4 803 18392 33189 18 4 43523 24447 12399 100 24955 83658 9626 144057 15 839 22335 16 136 24955 83658 83479 15 39102 724 16 678 645 2789 1328 4589 42 122009 115774 23 805 1328 46876 7 136 53894 1940 42227 41159 17721 823 425 4 27512 98722 206 136 5531 4970 919 17336 5 2 0 20080 618 83 82775 47 479 9 1517 73 53894 333 80581 110117 18811 5256 1295 51 152526 297 7986 390 124416 538 35431 214 98 15044 25737 136 7108 43701 23 756 135355 7 5 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 581 63773 119455 6 147797 88203 7 645 70 21 3285 10269 5 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamexlmrobertabase revisiond9d8a8ea5eb94b1c6654ae9249df7793cd2933d3 coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token unk 2 1 3 unk 2 1 3 overwrite from test_tokenization_common to speed up test as we don t have a slow version we can t compare the outputs between slow and fast versions checks it save with the same files the tokenizer json file for the fast one checks everything loads correctly in the same way check special tokens are set accordingly on rust and python self assertequal getattr tokenizer_rp key getattr tokenizer_pp key self assertequal getattr tokenizer_rp key _id getattr tokenizer_pp key _id save tokenizer rust legacy_format true checks it save with the same files checks everything loads correctly in the same way check special tokens are set accordingly on rust and python save tokenizer rust legacy_format false checks it saved the tokenizer json file checks everything loads correctly in the same way check special tokens are set accordingly on rust and python xlmr torch hub load pytorch fairseq xlmr base xlmr large has same tokenizer xlmr eval xlmr encode symbols 4426 what fairseq tokenizes from unk _ 3678 what fairseq tokenizes from unk unk 2740 what fairseq tokenizes from unk what we tokenize from unk unk residue from the tokenization an extra sentencepiece underline xlmr torch hub load pytorch fairseq xlmr base xlmr large has same tokenizer xlmr eval xlmr encode symbols fmt skip
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class XLMRobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLMRobertaTokenizer rust_tokenizer_class = XLMRobertaTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-1], "<mask>") self.assertEqual(len(vocab_keys), 1_002) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_002) def test_full_tokenizer(self): tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) def test_save_pretrained(self): if not self.test_slow_tokenizer: return self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) @cached_property def big_tokenizer(self): return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base") def test_picklable_without_disk(self): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SAMPLE_VOCAB, f.name) tokenizer = XLMRobertaTokenizer(f.name, keep_accents=True) pickled_tokenizer = pickle.dumps(tokenizer) pickle.loads(pickled_tokenizer) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [0, 35378, 6661, 38, 2] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) original_tokenizer_encodings = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, 3, 6, 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="xlm-roberta-base", revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3", )
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make sure that ids don t start with pad token first forward pass create hypothetical multiple next token and extent to nextinputids make sure that ids don t start with pad token append to next inputids and select random slice test that outputs are equal for slice todo fix the failed tests this regression test was failing with pytorch 1 3 ensure that the default position ids only assign a sequential this is a regression test for https github comhuggingfacetransformersissues1761 the position ids should be masked with the embedding object s padding index therefore the first available nonpadding position index is xlmrobertaxlembeddings paddingidx 1 ensure that the default position ids only assign a sequential this is a regression test for https github comhuggingfacetransformersissues1761 the position ids should be masked with the embedding object s padding index therefore the first available nonpadding position index is xlmrobertaxlembeddings paddingidx 1 the dog is cute and lives in the garden house compare the actual values for a slice of last dim the dog is cute and lives in the garden house compare the actual values for a slice of last dim coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make sure that ids don t start with pad token first forward pass create hypothetical multiple next token and extent to next_input_ids make sure that ids don t start with pad token append to next input_ids and select random slice test that outputs are equal for slice todo fix the failed tests this regression test was failing with pytorch 1 3 ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is xlmrobertaxlembeddings padding_idx 1 ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is xlmrobertaxlembeddings padding_idx 1 the dog is cute and lives in the garden house batch_size sequence_length embedding_vector_dim compare the actual values for a slice of last dim the dog is cute and lives in the garden house batch_size sequence_length embedding_vector_dim compare the actual values for a slice of last dim
import unittest from transformers import XLMRobertaXLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, ) from transformers.models.xlm_roberta_xl.modeling_xlm_roberta_xl import ( XLMRobertaXLEmbeddings, create_position_ids_from_input_ids, ) class XLMRobertaXLModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return XLMRobertaXLConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XLMRobertaXLModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = XLMRobertaXLModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = XLMRobertaXLForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = XLMRobertaXLForCausalLM(config=config).to(torch_device).eval() mask = input_ids.ne(config.pad_token_id).long() input_ids = input_ids * mask outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) mask = next_tokens.ne(config.pad_token_id).long() next_tokens = next_tokens * mask next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XLMRobertaXLForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = XLMRobertaXLForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = XLMRobertaXLForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XLMRobertaXLForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class XLMRobertaXLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLModel, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (XLMRobertaXLForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": XLMRobertaXLModel, "fill-mask": XLMRobertaXLForMaskedLM, "question-answering": XLMRobertaXLForQuestionAnswering, "text-classification": XLMRobertaXLForSequenceClassification, "text-generation": XLMRobertaXLForCausalLM, "token-classification": XLMRobertaXLForTokenClassification, "zero-shot": XLMRobertaXLForSequenceClassification, } if is_torch_available() else {} ) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def setUp(self): self.model_tester = XLMRobertaXLModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMRobertaXLConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_create_position_ids_respects_padding_index(self): config = self.model_tester.prepare_config_and_inputs()[0] model = XLMRobertaXLEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): config = self.model_tester.prepare_config_and_inputs()[0] embeddings = XLMRobertaXLEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) @require_torch class XLMRobertaModelXLIntegrationTest(unittest.TestCase): @slow def test_xlm_roberta_xl(self): model = XLMRobertaXLModel.from_pretrained("facebook/xlm-roberta-xl").to(torch_device) input_ids = torch.tensor( [[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]], device=torch_device ) expected_output_shape = torch.Size((1, 12, 2560)) expected_output_values_last_dim = torch.tensor( [[0.0110, 0.0605, 0.0354, 0.0689, 0.0066, 0.0691, 0.0302, 0.0412, 0.0860, 0.0036, 0.0405, 0.0170]], device=torch_device, ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) @unittest.skip(reason="Model is too large to be tested on the CI") def test_xlm_roberta_xxl(self): model = XLMRobertaXLModel.from_pretrained("facebook/xlm-roberta-xxl").to(torch_device) input_ids = torch.tensor( [[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]], device=torch_device ) expected_output_shape = torch.Size((1, 12, 4096)) expected_output_values_last_dim = torch.tensor( [[0.0046, 0.0146, 0.0227, 0.0126, 0.0219, 0.0175, -0.0101, 0.0006, 0.0124, 0.0209, -0.0063, 0.0096]], device=torch_device, ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license self keylen seqlength memlen permmask 1 1 0 previous tokens don t see last token targetmapping 0 1 1 0 predict last token tokentypeids tokentypeids note that tfxlnetmodeltest is not a subclass of generationtestermixin so no contrastive generation tests from there is run against tfxlnetmodel todo fix the failed tests exception encountered when calling layer overwrite since tfxlnetlmheadmodel doesn t cut logitslabels the number of elements in the loss should be the same as the number of elements in the label tfxlnetlmheadmodel doesn t cut logitslabels if model class in getvaluestfmodelforcausallmmapping if loss is causal lm loss labels are shift so that one label per batch is cut losssize losssize self modeltester batchsize test that model correctly compute the loss with kwargs test that model correctly compute the loss with a dict test that model correctly compute the loss with a tuple get keys that were added with the prepareforclass function create a dictionary holding the location of the tensors in the tuple initialize a list with their default values update the values and convert to a tuple send to model fmt off fmt on in 1991 the remains of russian tsar nicholas ii and his family except for alexei and maria are discovered the voice of nicholas s young son tsarevich alexei nikolaevich narrates the remainder of the story 1883 western siberia a young grigori rasputin is asked by his father and a group of men to perform magic rasputin has a vision and denounces one of the men as a horse thief although his father initially slaps him for making such an accusation rasputin watches as the man is chased outside and beaten twenty years later rasputin sees a vision of the virgin mary prompting him to become a priest rasputin quickly becomes famous with people even a bishop begging for his blessing fmt off fmt on in 1991 the remains of russian tsar nicholas ii and his family except for alexei and maria are discovered the voice of nicholas s young son tsarevich alexei nikolaevich narrates the remainder of the story 1883 western siberia a young grigori rasputin is asked by his father and a group of men to perform magic rasputin has a vision and denounces one of the men as a horse thief although his father initially slaps him for making such an accusation rasputin watches as the man is chased outside and beaten twenty years later rasputin sees a vision of the virgin mary prompting him to become a priest rasputin quickly becomes famous with people even a bishop begging for his blessing sepcls rasputin is asked to perform magic he is asked to perform a ritual of the virgin mary he is asked to perform a ritual of the virgin mary he is asked to perform coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license self key_len seq_length mem_len perm_mask 1 1 0 previous tokens don t see last token target_mapping 0 1 1 0 predict last token token_type_ids token_type_ids todo pvp check other models whether language generation is also applicable note that tfxlnetmodeltest is not a subclass of generationtestermixin so no contrastive generation tests from there is run against tfxlnetmodel todo fix the failed tests exception encountered when calling layer overwrite since tfxlnetlmheadmodel doesn t cut logits labels the number of elements in the loss should be the same as the number of elements in the label tfxlnetlmheadmodel doesn t cut logits labels if model __class__ in get_values tf_model_for_causal_lm_mapping if loss is causal lm loss labels are shift so that one label per batch is cut loss_size loss_size self model_tester batch_size test that model correctly compute the loss with kwargs test that model correctly compute the loss with a dict test that model correctly compute the loss with a tuple get keys that were added with the _prepare_for_class function create a dictionary holding the location of the tensors in the tuple initialize a list with their default values update the values and convert to a tuple send to model fmt off fmt on in 1991 the remains of russian tsar nicholas ii and his family except for alexei and maria are discovered the voice of nicholas s young son tsarevich alexei nikolaevich narrates the remainder of the story 1883 western siberia a young grigori rasputin is asked by his father and a group of men to perform magic rasputin has a vision and denounces one of the men as a horse thief although his father initially slaps him for making such an accusation rasputin watches as the man is chased outside and beaten twenty years later rasputin sees a vision of the virgin mary prompting him to become a priest rasputin quickly becomes famous with people even a bishop begging for his blessing fmt off fmt on in 1991 the remains of russian tsar nicholas ii and his family except for alexei and maria are discovered the voice of nicholas s young son tsarevich alexei nikolaevich narrates the remainder of the story 1883 western siberia a young grigori rasputin is asked by his father and a group of men to perform magic rasputin has a vision and denounces one of the men as a horse thief although his father initially slaps him for making such an accusation rasputin watches as the man is chased outside and beaten twenty years later rasputin sees a vision of the virgin mary prompting him to become a priest rasputin quickly becomes famous with people even a bishop begging for his blessing sep cls rasputin is asked to perform magic he is asked to perform a ritual of the virgin mary he is asked to perform a ritual of the virgin mary he is asked to perform
from __future__ import annotations import inspect import random import unittest from transformers import XLNetConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xlnet.modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetModel, ) class TFXLNetModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.mem_len = 10 self.clamp_len = -1 self.reuse_len = 15 self.is_training = True self.use_labels = True self.vocab_size = 99 self.cutoffs = [10, 50, 80] self.hidden_size = 32 self.num_attention_heads = 4 self.d_inner = 128 self.num_hidden_layers = 2 self.type_sequence_label_size = 2 self.untie_r = True self.bi_data = False self.same_length = False self.initializer_range = 0.05 self.seed = 1 self.type_vocab_size = 2 self.bos_token_id = 1 self.eos_token_id = 2 self.pad_token_id = 5 self.num_choices = 4 def prepare_config_and_inputs(self): input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.float32) input_ids_q = ids_tensor([self.batch_size, self.seq_length + 1], self.vocab_size) perm_mask = tf.zeros((self.batch_size, self.seq_length + 1, self.seq_length), dtype=tf.float32) perm_mask_last = tf.ones((self.batch_size, self.seq_length + 1, 1), dtype=tf.float32) perm_mask = tf.concat([perm_mask, perm_mask_last], axis=-1) target_mapping = tf.zeros((self.batch_size, 1, self.seq_length), dtype=tf.float32) target_mapping_last = tf.ones((self.batch_size, 1, 1), dtype=tf.float32) target_mapping = tf.concat([target_mapping, target_mapping_last], axis=-1) sequence_labels = None lm_labels = None is_impossible_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) is_impossible_labels = ids_tensor([self.batch_size], 2, dtype=tf.float32) config = XLNetConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, n_head=self.num_attention_heads, d_inner=self.d_inner, n_layer=self.num_hidden_layers, untie_r=self.untie_r, mem_len=self.mem_len, clamp_len=self.clamp_len, same_length=self.same_length, reuse_len=self.reuse_len, bi_data=self.bi_data, initializer_range=self.initializer_range, num_labels=self.type_sequence_label_size, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, ) return ( config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, ) def set_seed(self): random.seed(self.seed) tf.random.set_seed(self.seed) def create_and_check_xlnet_base_model( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, ): model = TFXLNetModel(config) inputs = {"input_ids": input_ids_1, "input_mask": input_mask, "token_type_ids": segment_ids} result = model(inputs) inputs = [input_ids_1, input_mask] result = model(inputs) config.use_mems_eval = False model = TFXLNetModel(config) no_mems_outputs = model(inputs) self.parent.assertEqual(len(no_mems_outputs), 1) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in result.mems], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_xlnet_lm_head( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, ): model = TFXLNetLMHeadModel(config) inputs_1 = {"input_ids": input_ids_1, "token_type_ids": segment_ids} all_logits_1, mems_1 = model(inputs_1).to_tuple() inputs_2 = {"input_ids": input_ids_2, "mems": mems_1, "token_type_ids": segment_ids} all_logits_2, mems_2 = model(inputs_2).to_tuple() inputs_3 = {"input_ids": input_ids_q, "perm_mask": perm_mask, "target_mapping": target_mapping} logits, _ = model(inputs_3).to_tuple() self.parent.assertEqual(all_logits_1.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_1], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertEqual(all_logits_2.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_2], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_xlnet_qa( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, ): model = TFXLNetForQuestionAnsweringSimple(config) inputs = {"input_ids": input_ids_1, "attention_mask": input_mask, "token_type_ids": segment_ids} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertListEqual( [mem.shape for mem in result.mems], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_xlnet_sequence_classif( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, ): model = TFXLNetForSequenceClassification(config) result = model(input_ids_1) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) self.parent.assertListEqual( [mem.shape for mem in result.mems], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_xlnet_for_token_classification( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, ): config.num_labels = input_ids_1.shape[1] model = TFXLNetForTokenClassification(config) inputs = { "input_ids": input_ids_1, "attention_mask": input_mask, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, config.num_labels)) self.parent.assertListEqual( [mem.shape for mem in result.mems], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_xlnet_for_multiple_choice( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, ): config.num_choices = self.num_choices model = TFXLNetForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids_1, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(segment_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) self.parent.assertListEqual( [mem.shape for mem in result.mems], [(self.seq_length, self.batch_size * self.num_choices, self.hidden_size)] * self.num_hidden_layers, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids_1} return config, inputs_dict @require_tf class TFXLNetModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple, TFXLNetForMultipleChoice, ) if is_tf_available() else () ) all_generative_model_classes = ( (TFXLNetLMHeadModel,) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFXLNetModel, "question-answering": TFXLNetForQuestionAnsweringSimple, "text-classification": TFXLNetForSequenceClassification, "text-generation": TFXLNetLMHeadModel, "token-classification": TFXLNetForTokenClassification, "zero-shot": TFXLNetForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False @unittest.skip("XLNet has special cache mechanism and is currently not working with contrastive generation") def test_xla_generate_contrastive(self): super().test_xla_generate_contrastive() def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = TFXLNetModelTester(self) self.config_tester = ConfigTester(self, config_class=XLNetConfig, d_inner=37) def test_config(self): self.config_tester.run_common_tests() def test_xlnet_base_model(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_base_model(*config_and_inputs) def test_xlnet_lm_head(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs) def test_xlnet_sequence_classif(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_sequence_classif(*config_and_inputs) def test_xlnet_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_for_token_classification(*config_and_inputs) def test_xlnet_qa(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_qa(*config_and_inputs) def test_xlnet_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFXLNetModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("Some of the XLNet models misbehave with flexible input shapes.") def test_compile_tf_model(self): pass def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) if getattr(model, "hf_compute_loss", None): prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] expected_loss_size = added_label.shape.as_list()[:1] prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) input_name = "input_ids" if "input_ids" in prepared_for_class else "pixel_values" input_ids = prepared_for_class.pop(input_name) loss = model(input_ids, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) loss = model(prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) loss = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) @require_tf class TFXLNetModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xlnet_base_cased(self): model = TFXLNetLMHeadModel.from_pretrained("xlnet-base-cased") input_ids = tf.convert_to_tensor( [ [ 67, 2840, 19, 18, 1484, 20, 965, 29077, 8719, 1273, 21, 45, 273, 17, 10, 15048, 28, 27511, 21, 4185, 11, 41, 2444, 9, 32, 1025, 20, 8719, 26, 23, 673, 966, 19, 29077, 20643, 27511, 20822, 20643, 19, 17, 6616, 17511, 18, 8978, 20, 18, 777, 9, 19233, 1527, 17669, 19, 24, 673, 17, 28756, 150, 12943, 4354, 153, 27, 442, 37, 45, 668, 21, 24, 256, 20, 416, 22, 2771, 4901, 9, 12943, 4354, 153, 51, 24, 3004, 21, 28142, 23, 65, 20, 18, 416, 34, 24, 2958, 22947, 9, 1177, 45, 668, 3097, 13768, 23, 103, 28, 441, 148, 48, 20522, 19, 12943, 4354, 153, 12860, 34, 18, 326, 27, 17492, 684, 21, 6709, 9, 8585, 123, 266, 19, 12943, 4354, 153, 6872, 24, 3004, 20, 18, 9225, 2198, 19, 12717, 103, 22, 401, 24, 6348, 9, 12943, 4354, 153, 1068, 2768, 2286, 19, 33, 104, 19, 176, 24, 9313, 19, 20086, 28, 45, 10292, 9, 4, 3, ] ], dtype=tf.int32, ) expected_output_ids = [ 67, 2840, 19, 18, 1484, 20, 965, 29077, 8719, 1273, 21, 45, 273, 17, 10, 15048, 28, 27511, 21, 4185, 11, 41, 2444, 9, 32, 1025, 20, 8719, 26, 23, 673, 966, 19, 29077, 20643, 27511, 20822, 20643, 19, 17, 6616, 17511, 18, 8978, 20, 18, 777, 9, 19233, 1527, 17669, 19, 24, 673, 17, 28756, 150, 12943, 4354, 153, 27, 442, 37, 45, 668, 21, 24, 256, 20, 416, 22, 2771, 4901, 9, 12943, 4354, 153, 51, 24, 3004, 21, 28142, 23, 65, 20, 18, 416, 34, 24, 2958, 22947, 9, 1177, 45, 668, 3097, 13768, 23, 103, 28, 441, 148, 48, 20522, 19, 12943, 4354, 153, 12860, 34, 18, 326, 27, 17492, 684, 21, 6709, 9, 8585, 123, 266, 19, 12943, 4354, 153, 6872, 24, 3004, 20, 18, 9225, 2198, 19, 12717, 103, 22, 401, 24, 6348, 9, 12943, 4354, 153, 1068, 2768, 2286, 19, 33, 104, 19, 176, 24, 9313, 19, 20086, 28, 45, 10292, 9, 4, 3, 19, 12943, 4354, 153, 27, 442, 22, 2771, 4901, 9, 69, 27, 442, 22, 2771, 24, 11335, 20, 18, 9225, 2198, 9, 69, 27, 442, 22, 2771, 24, 11335, 20, 18, 9225, 2198, 9, 69, 27, 442, 22, 2771, ] output_ids = model.generate(input_ids, max_length=200, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license self keylen seqlength memlen first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids causal mask second forward pass select random slice test that outputs are equal for slice todo fix the failed tests xlnet has 2 qa models need to manually set the correct labels for one of them here checking that in autoregressive mode usemems gives the same results xlnet cannot keep gradients in attentions or hidden states overwrite from testmodelingcommon check hidden size every 2nd tensor is from extra stream for first item dummy pad token is appended so need one more for first item dummy pad token is appended so need one more check attn size fmt off fmt on in 1991 the remains of russian tsar nicholas ii and his family except for alexei and maria are discovered the voice of nicholas s young son tsarevich alexei nikolaevich narrates the remainder of the story 1883 western siberia a young grigori rasputin is asked by his father and a group of men to perform magic rasputin has a vision and denounces one of the men as a horse thief although his father initially slaps him for making such an accusation rasputin watches as the man is chased outside and beaten twenty years later rasputin sees a vision of the virgin mary prompting him to become a priest rasputin quickly becomes famous with people even a bishop begging for his blessing fmt off fmt on in 1991 the remains of russian tsar nicholas ii and his family except for alexei and maria are discovered the voice of nicholas s young son tsarevich alexei nikolaevich narrates the remainder of the story 1883 western siberia a young grigori rasputin is asked by his father and a group of men to perform magic rasputin has a vision and denounces one of the men as a horse thief although his father initially slaps him for making such an accusation rasputin watches as the man is chased outside and beaten twenty years later rasputin sees a vision of the virgin mary prompting him to become a priest rasputin quickly becomes famous with people even a bishop begging for his blessing sepcls rasputin is asked to perform magic he is asked to perform a ritual of the virgin mary he is asked to perform a ritual of the virgin mary he is asked to perform coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license self key_len seq_length mem_len previous tokens don t see last token predict last token first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids causal mask second forward pass select random slice test that outputs are equal for slice todo pvp check other models whether language generation is also applicable todo fix the failed tests xlnet has 2 qa models need to manually set the correct labels for one of them here checking that in auto regressive mode use_mems gives the same results xlnet cannot keep gradients in attentions or hidden states overwrite from test_modeling_common check hidden size every 2nd tensor is from extra stream for first item dummy pad token is appended so need one more for first item dummy pad token is appended so need one more check attn size fmt off fmt on in 1991 the remains of russian tsar nicholas ii and his family except for alexei and maria are discovered the voice of nicholas s young son tsarevich alexei nikolaevich narrates the remainder of the story 1883 western siberia a young grigori rasputin is asked by his father and a group of men to perform magic rasputin has a vision and denounces one of the men as a horse thief although his father initially slaps him for making such an accusation rasputin watches as the man is chased outside and beaten twenty years later rasputin sees a vision of the virgin mary prompting him to become a priest rasputin quickly becomes famous with people even a bishop begging for his blessing fmt off fmt on in 1991 the remains of russian tsar nicholas ii and his family except for alexei and maria are discovered the voice of nicholas s young son tsarevich alexei nikolaevich narrates the remainder of the story 1883 western siberia a young grigori rasputin is asked by his father and a group of men to perform magic rasputin has a vision and denounces one of the men as a horse thief although his father initially slaps him for making such an accusation rasputin watches as the man is chased outside and beaten twenty years later rasputin sees a vision of the virgin mary prompting him to become a priest rasputin quickly becomes famous with people even a bishop begging for his blessing sep cls rasputin is asked to perform magic he is asked to perform a ritual of the virgin mary he is asked to perform a ritual of the virgin mary he is asked to perform
import random import unittest from transformers import XLNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, ) from transformers.models.xlnet.modeling_xlnet import XLNET_PRETRAINED_MODEL_ARCHIVE_LIST class XLNetModelTester: def __init__( self, parent, batch_size=14, seq_length=7, mem_len=10, clamp_len=-1, reuse_len=15, is_training=True, use_labels=True, vocab_size=99, cutoffs=[10, 50, 80], hidden_size=32, num_attention_heads=4, d_inner=128, num_hidden_layers=2, type_sequence_label_size=2, untie_r=True, bi_data=False, same_length=False, initializer_range=0.05, seed=1, type_vocab_size=2, bos_token_id=1, eos_token_id=2, pad_token_id=5, num_choices=4, ): self.parent = parent self.batch_size = 14 self.seq_length = 7 self.mem_len = 10 self.clamp_len = -1 self.reuse_len = 15 self.is_training = True self.use_labels = True self.vocab_size = 99 self.cutoffs = [10, 50, 80] self.hidden_size = 32 self.num_attention_heads = 4 self.d_inner = 128 self.num_hidden_layers = 5 self.type_sequence_label_size = 2 self.untie_r = True self.bi_data = False self.same_length = False self.initializer_range = 0.05 self.seed = 1 self.type_vocab_size = 2 self.bos_token_id = 1 self.eos_token_id = 2 self.pad_token_id = 5 self.num_choices = 4 def prepare_config_and_inputs(self): input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_ids_q = ids_tensor([self.batch_size, self.seq_length + 1], self.vocab_size) perm_mask = torch.zeros( self.batch_size, self.seq_length + 1, self.seq_length + 1, dtype=torch.float, device=torch_device, ) perm_mask[:, :, -1] = 1.0 target_mapping = torch.zeros( self.batch_size, 1, self.seq_length + 1, dtype=torch.float, device=torch_device, ) target_mapping[:, 0, -1] = 1.0 sequence_labels = None lm_labels = None is_impossible_labels = None token_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) is_impossible_labels = ids_tensor([self.batch_size], 2).float() token_labels = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = self.get_config() return ( config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ) def get_config(self): return XLNetConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, n_head=self.num_attention_heads, d_inner=self.d_inner, n_layer=self.num_hidden_layers, untie_r=self.untie_r, mem_len=self.mem_len, clamp_len=self.clamp_len, same_length=self.same_length, reuse_len=self.reuse_len, bi_data=self.bi_data, initializer_range=self.initializer_range, num_labels=self.type_sequence_label_size, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, ) def set_seed(self): random.seed(self.seed) torch.manual_seed(self.seed) def create_and_check_xlnet_base_model( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ): model = XLNetModel(config) model.to(torch_device) model.eval() result = model(input_ids_1, input_mask=input_mask) result = model(input_ids_1, attention_mask=input_mask) result = model(input_ids_1, token_type_ids=segment_ids) result = model(input_ids_1) config.mem_len = 0 model = XLNetModel(config) model.to(torch_device) model.eval() base_model_output = model(input_ids_1) self.parent.assertEqual(len(base_model_output), 2) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in result.mems], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_use_mems_train( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ): model = XLNetForSequenceClassification(config) model.to(torch_device) model.train() train_size = input_ids_1.shape[0] batch_size = 4 for i in range(train_size // batch_size + 1): input_ids = input_ids_1[i : (i + 1) * batch_size] labels = sequence_labels[i : (i + 1) * batch_size] outputs = model(input_ids=input_ids, labels=labels, return_dict=True) self.parent.assertIsNone(outputs.mems) self.parent.assertIsNotNone(outputs.loss) def create_and_check_xlnet_model_use_mems( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ): model = XLNetModel(config=config) model.to(torch_device) model.eval() causal_mask = torch.ones( input_ids_1.shape[0], input_ids_1.shape[1], input_ids_1.shape[1], dtype=torch.float, device=torch_device, ) causal_mask = torch.triu(causal_mask, diagonal=0) outputs_cache = model(input_ids_1, use_mems=True, perm_mask=causal_mask) outputs_no_cache = model(input_ids_1, use_mems=False, perm_mask=causal_mask) outputs_conf = model(input_ids_1) self.parent.assertTrue(len(outputs_cache) == len(outputs_conf)) self.parent.assertTrue(len(outputs_cache) == len(outputs_no_cache) + 1) output, mems = outputs_cache.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids_1, next_tokens], dim=-1) causal_mask = torch.ones( input_ids_1.shape[0], input_ids_1.shape[1] + 1, input_ids_1.shape[1] + 1, dtype=torch.float, device=torch_device, ) causal_mask = torch.triu(causal_mask, diagonal=0) single_mask = torch.ones(input_ids_1.shape[0], 1, 1, dtype=torch.float, device=torch_device) output_from_no_past = model(next_input_ids, perm_mask=causal_mask)["last_hidden_state"] output_from_past = model(next_tokens, mems=mems, perm_mask=single_mask)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_xlnet_base_model_with_att_output( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ): model = XLNetModel(config) model.to(torch_device) model.eval() attentions = model(input_ids_1, target_mapping=target_mapping, output_attentions=True)["attentions"] self.parent.assertEqual(len(attentions), config.n_layer) self.parent.assertIsInstance(attentions[0], tuple) self.parent.assertEqual(len(attentions[0]), 2) self.parent.assertTrue(attentions[0][0].shape, attentions[0][0].shape) def create_and_check_xlnet_lm_head( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ): model = XLNetLMHeadModel(config) model.to(torch_device) model.eval() result1 = model(input_ids_1, token_type_ids=segment_ids, labels=lm_labels) result2 = model(input_ids_2, token_type_ids=segment_ids, labels=lm_labels, mems=result1.mems) _ = model(input_ids_q, perm_mask=perm_mask, target_mapping=target_mapping) self.parent.assertEqual(result1.loss.shape, ()) self.parent.assertEqual(result1.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in result1.mems], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertEqual(result2.loss.shape, ()) self.parent.assertEqual(result2.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in result2.mems], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_xlnet_qa( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ): model = XLNetForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids_1) result_with_labels = model( input_ids_1, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids_1, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) total_loss, mems = result_with_labels.to_tuple() result_with_labels = model( input_ids_1, start_positions=sequence_labels, end_positions=sequence_labels, ) total_loss, mems = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) self.parent.assertListEqual( [mem.shape for mem in result.mems], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_xlnet_token_classif( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ): model = XLNetForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids_1) result = model(input_ids_1, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.type_sequence_label_size)) self.parent.assertListEqual( [mem.shape for mem in result.mems], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_xlnet_sequence_classif( self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ): model = XLNetForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids_1) result = model(input_ids_1, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) self.parent.assertListEqual( [mem.shape for mem in result.mems], [(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids_1} return config, inputs_dict @require_torch class XLNetModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( XLNetModel, XLNetLMHeadModel, XLNetForTokenClassification, XLNetForSequenceClassification, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForMultipleChoice, ) if is_torch_available() else () ) all_generative_model_classes = ( (XLNetLMHeadModel,) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": XLNetModel, "question-answering": XLNetForQuestionAnsweringSimple, "text-classification": XLNetForSequenceClassification, "text-generation": XLNetLMHeadModel, "token-classification": XLNetForTokenClassification, "zero-shot": XLNetForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = False test_pruning = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "XLNetForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = XLNetModelTester(self) self.config_tester = ConfigTester(self, config_class=XLNetConfig, d_inner=37) def test_config(self): self.config_tester.run_common_tests() def test_xlnet_base_model(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_base_model(*config_and_inputs) def test_xlnet_base_model_use_mems(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_model_use_mems(*config_and_inputs) def test_seq_classification_use_mems_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_use_mems_train(*config_and_inputs) def test_xlnet_base_model_with_att_output(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_base_model_with_att_output(*config_and_inputs) def test_xlnet_lm_head(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs) def test_xlnet_sequence_classif(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_sequence_classif(*config_and_inputs) def test_xlnet_token_classif(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_token_classif(*config_and_inputs) def test_xlnet_qa(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_qa(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): return def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) for param in ["q", "k", "v", "o", "r", "r_r_bias", "r_s_bias", "r_w_bias", "seg_embed", "mask_emb"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): for i, layer_hidden_states in enumerate(iter_hidden_states): if i % 2 != 0: seq_len = 1 else: seq_len = (min_length + 1) if idx == 0 else min_length expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) self.assertEqual(layer_hidden_states.shape, expected_shape) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, attentions_item in enumerate(attentions): for iter_attentions in attentions_item: tgt_len = min_length if idx == 0: tgt_len += 1 src_len = min_length + idx + 1 expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions), ) @slow def test_model_from_pretrained(self): for model_name in XLNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = XLNetModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class XLNetModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xlnet_base_cased(self): model = XLNetLMHeadModel.from_pretrained("xlnet-base-cased") model.to(torch_device) input_ids = torch.tensor( [ [ 67, 2840, 19, 18, 1484, 20, 965, 29077, 8719, 1273, 21, 45, 273, 17, 10, 15048, 28, 27511, 21, 4185, 11, 41, 2444, 9, 32, 1025, 20, 8719, 26, 23, 673, 966, 19, 29077, 20643, 27511, 20822, 20643, 19, 17, 6616, 17511, 18, 8978, 20, 18, 777, 9, 19233, 1527, 17669, 19, 24, 673, 17, 28756, 150, 12943, 4354, 153, 27, 442, 37, 45, 668, 21, 24, 256, 20, 416, 22, 2771, 4901, 9, 12943, 4354, 153, 51, 24, 3004, 21, 28142, 23, 65, 20, 18, 416, 34, 24, 2958, 22947, 9, 1177, 45, 668, 3097, 13768, 23, 103, 28, 441, 148, 48, 20522, 19, 12943, 4354, 153, 12860, 34, 18, 326, 27, 17492, 684, 21, 6709, 9, 8585, 123, 266, 19, 12943, 4354, 153, 6872, 24, 3004, 20, 18, 9225, 2198, 19, 12717, 103, 22, 401, 24, 6348, 9, 12943, 4354, 153, 1068, 2768, 2286, 19, 33, 104, 19, 176, 24, 9313, 19, 20086, 28, 45, 10292, 9, 4, 3, ] ], dtype=torch.long, device=torch_device, ) expected_output_ids = [ 67, 2840, 19, 18, 1484, 20, 965, 29077, 8719, 1273, 21, 45, 273, 17, 10, 15048, 28, 27511, 21, 4185, 11, 41, 2444, 9, 32, 1025, 20, 8719, 26, 23, 673, 966, 19, 29077, 20643, 27511, 20822, 20643, 19, 17, 6616, 17511, 18, 8978, 20, 18, 777, 9, 19233, 1527, 17669, 19, 24, 673, 17, 28756, 150, 12943, 4354, 153, 27, 442, 37, 45, 668, 21, 24, 256, 20, 416, 22, 2771, 4901, 9, 12943, 4354, 153, 51, 24, 3004, 21, 28142, 23, 65, 20, 18, 416, 34, 24, 2958, 22947, 9, 1177, 45, 668, 3097, 13768, 23, 103, 28, 441, 148, 48, 20522, 19, 12943, 4354, 153, 12860, 34, 18, 326, 27, 17492, 684, 21, 6709, 9, 8585, 123, 266, 19, 12943, 4354, 153, 6872, 24, 3004, 20, 18, 9225, 2198, 19, 12717, 103, 22, 401, 24, 6348, 9, 12943, 4354, 153, 1068, 2768, 2286, 19, 33, 104, 19, 176, 24, 9313, 19, 20086, 28, 45, 10292, 9, 4, 3, 19, 12943, 4354, 153, 27, 442, 22, 2771, 4901, 9, 69, 27, 442, 22, 2771, 24, 11335, 20, 18, 9225, 2198, 9, 69, 27, 442, 22, 2771, 24, 11335, 20, 18, 9225, 2198, 9, 69, 27, 442, 22, 2771, ] output_ids = model.generate(input_ids, max_length=200, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token s tokenid 1 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 unk self assertequalvocabkeys1 s self assertequalvocabkeys1 eod self assertequallenvocabkeys 1006 def testvocabsizeself self assertequalself gettokenizer vocabsize 1000 def testfulltokenizerself tokenizer xlnettokenizersamplevocab keepaccentstrue tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a t est self assertlistequaltokenizer converttokenstoidstokens 285 46 10 170 382 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s ids tokenizer converttokenstoidstokens self assertlistequalids 8 21 84 55 24 19 7 0 602 347 347 347 3 12 66 46 72 80 6 0 4 backtokens tokenizer convertidstotokensids self assertlistequal backtokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline unk 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s unk def testtokenizerlowerself tokenizer xlnettokenizersamplevocab dolowercasetrue tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al se self assertlistequaltokenizer tokenizehu00e9llo he ll o def testtokenizernolowerself tokenizer xlnettokenizersamplevocab dolowercasefalse tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al se slow def testsequencebuildersself tokenizer xlnettokenizer frompretrainedxlnetbasecased text tokenizer encodesequence builders addspecialtokensfalse text2 tokenizer encodemultisequence build addspecialtokensfalse encodedsentence tokenizer buildinputswithspecialtokenstext encodedpair tokenizer buildinputswithspecialtokenstext text2 assert encodedsentence text 4 3 assert encodedpair text 4 text2 4 3 slow def testtokenizerintegrationself expectedencoding inputids 17 21442 270 17 10 14645 318 34 17 4546 3145 787 13 7752 22018 23 21 17 4546 3145 787 13 3352 14431 13 5500 11 1176 580 13 16819 4797 23 17 10 17135 658 19 457 7932 13 184 19 3154 17135 6468 19 1404 12269 19 4229 5356 16264 46 19 17 20545 10395 9 9 9 11 28 6421 9531 20729 17 10 353 17022 11 21 6421 9531 16949 17 10 11509 753 11 33 95 2421 7385 956 14431 2626 25 842 7385 4836 21 1429 2272 9855 3120 161 24738 19 13203 658 218 787 21 430 18482 847 2637 9 4 3 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 322 22178 27 1064 22 956 13 11101 1429 5854 24313 18953 40 422 24366 68 1758 37 10483 14257 31 207 263 21 203 3773 25 71 9735 9 4 3 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 32 2049 3442 17 13894 3380 23 95 18 17634 2288 9 4 3 tokentypeids 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 2 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamexlnetbasecased revisionc841166438c31ec7ca9a106dee7bb312b73ae511 coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token fmt skip
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class XLNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLNetTokenizer rust_tokenizer_class = XLNetTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[-1], "<eod>") self.assertEqual(len(vocab_keys), 1_006) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_000) def test_full_tokenizer(self): tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) def test_tokenizer_lower(self): tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "", "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ], ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["▁he", "ll", "o"]) def test_tokenizer_no_lower(self): tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ], ) @slow def test_sequence_builders(self): tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_2 + [4, 3] @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="xlnet-base-cased", revision="c841166438c31ec7ca9a106dee7bb312b73ae511", )
codingutf8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make sure that ids don t start with pad token first forward pass create hypothetical multiple next token and extent to nextinputids make sure that ids don t start with pad token append to next inputids and select random slice test that outputs are equal for slice todo fix the failed tests this regression test was failing with pytorch 1 3 ensure that the default position ids only assign a sequential this is a regression test for https github comhuggingfacetransformersissues1761 the position ids should be masked with the embedding object s padding index therefore the first available nonpadding position index is xmodembeddings paddingidx 1 ensure that the default position ids only assign a sequential this is a regression test for https github comhuggingfacetransformersissues1761 the position ids should be masked with the embedding object s padding index therefore the first available nonpadding position index is xmodembeddings paddingidx 1 language enxx the dog is cute and lives in the garden house compare the actual values for a slice of last dim language dede der hund ist niedlich und wohnt in einem gartenhaus fmt off fmt on compare the actual values for a slice of last dim language enxx the dog is cute and lives in the garden house fmt off fmt on compare the actual values for a slice of last dim language dede der hund ist niedlich und wohnt in einem gartenhaus fmt off fmt on compare the actual values for a slice of last dim fmt off fmt on fmt off fmt on compare the actual values for a slice of last dim coding utf 8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make sure that ids don t start with pad token first forward pass create hypothetical multiple next token and extent to next_input_ids make sure that ids don t start with pad token append to next input_ids and select random slice test that outputs are equal for slice todo fix the failed tests this regression test was failing with pytorch 1 3 ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is xmodembeddings padding_idx 1 ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is xmodembeddings padding_idx 1 language en_xx the dog is cute and lives in the garden house batch_size sequence_length embedding_vector_dim compare the actual values for a slice of last dim language de_de der hund ist niedlich und wohnt in einem gartenhaus batch_size sequence_length embedding_vector_dim fmt off fmt on compare the actual values for a slice of last dim language en_xx the dog is cute and lives in the garden house batch_size sequence_length embedding_vector_dim fmt off fmt on compare the actual values for a slice of last dim language de_de der hund ist niedlich und wohnt in einem gartenhaus batch_size sequence_length embedding_vector_dim fmt off fmt on compare the actual values for a slice of last dim fmt off fmt on batch_size sequence_length embedding_vector_dim fmt off fmt on compare the actual values for a slice of last dim
import unittest from transformers import XLMRobertaTokenizer, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XmodConfig, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, ) from transformers.models.xmod.modeling_xmod import XmodEmbeddings, create_position_ids_from_input_ids class XmodModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return XmodConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, default_language="en_XX", ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XmodModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = XmodModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = XmodForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = XmodForCausalLM(config=config).to(torch_device).eval() mask = input_ids.ne(config.pad_token_id).long() input_ids = input_ids * mask outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) mask = next_tokens.ne(config.pad_token_id).long() next_tokens = next_tokens * mask next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XmodForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = XmodForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = XmodForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = XmodForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class XmodModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( XmodForCausalLM, XmodForMaskedLM, XmodModel, XmodForSequenceClassification, XmodForTokenClassification, XmodForMultipleChoice, XmodForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (XmodForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": XmodModel, "fill-mask": XmodForMaskedLM, "question-answering": XmodForQuestionAnswering, "text-classification": XmodForSequenceClassification, "text-generation": XmodForCausalLM, "token-classification": XmodForTokenClassification, "zero-shot": XmodForSequenceClassification, } if is_torch_available() else {} ) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def setUp(self): self.model_tester = XmodModelTester(self) self.config_tester = ConfigTester(self, config_class=XmodConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_create_position_ids_respects_padding_index(self): config = self.model_tester.prepare_config_and_inputs()[0] model = XmodEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): config = self.model_tester.prepare_config_and_inputs()[0] embeddings = XmodEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_set_default_language(self): config = self.model_tester.prepare_config_and_inputs()[0] model = XmodForMaskedLM(config=config) model.set_default_language("en_XX") self.assertEqual(model.config.default_language, "en_XX") with self.assertRaises(ValueError): model.set_default_language("xx_XX") def test_freeze_embeddings_and_language_adapters(self): config = self.model_tester.prepare_config_and_inputs()[0] model = XmodForMaskedLM(config=config) num_trainable_params_before = sum(p.numel() for p in model.parameters() if p.requires_grad) model.freeze_embeddings_and_language_adapters() num_trainable_params_after = sum(p.numel() for p in model.parameters() if p.requires_grad) self.assertLess(num_trainable_params_after, num_trainable_params_before) @require_sentencepiece @require_tokenizers @require_torch class XmodModelIntegrationTest(unittest.TestCase): @slow def test_xmod_base(self): model = XmodModel.from_pretrained("facebook/xmod-base") model.set_default_language("en_XX") input_ids = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) expected_output_shape = torch.Size((1, 12, 768)) expected_output_values_last_dim = torch.tensor( [[-0.2394, -0.0036, 0.1252, -0.0087, 0.1325, 0.0580, -0.2049, -0.1978, -0.1223, 0.0648, -0.2599, -0.3724]] ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) model.set_default_language("de_DE") input_ids = torch.tensor([[0, 1310, 49083, 443, 269, 71, 5486, 165, 60429, 660, 23, 2315, 58761, 18391, 5, 2]]) expected_output_shape = torch.Size((1, 16, 768)) expected_output_values_last_dim = torch.tensor( [[0.0162, 0.0075, -0.1882, 0.2335, -0.0952, -0.3994, -0.0317, -0.1174, 0.0177, 0.4280, -0.0240, -0.2138, 0.0785, -0.1045, -0.2811, -0.3220]] ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) @slow def test_xmod_large_prenorm(self): model = XmodModel.from_pretrained("facebook/xmod-large-prenorm") model.set_default_language("en_XX") input_ids = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) expected_output_shape = torch.Size((1, 12, 1024)) expected_output_values_last_dim = torch.tensor( [[-0.0121, -0.0194, -0.0240, -0.0160, -0.0205, -0.0159, -0.0243, -0.0206, -0.0161, -0.0335, -0.0196, -0.0141]] ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) model.set_default_language("de_DE") input_ids = torch.tensor([[0, 1310, 49083, 443, 269, 71, 5486, 165, 60429, 660, 23, 2315, 58761, 18391, 5, 2]]) expected_output_shape = torch.Size((1, 16, 1024)) expected_output_values_last_dim = torch.tensor( [[-0.0120, -0.0262, -0.0253, -0.0112, -0.0128, -0.0164, -0.0080, -0.0081, -0.0192, -0.0117, -0.0170, -0.0120, -0.0210, -0.0173, -0.0078, -0.0122]] ) output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) @slow def test_multilingual_batch(self): model = XmodModel.from_pretrained("facebook/xmod-base") input_ids = torch.tensor([ [0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2], [0, 1310, 49083, 443, 269, 71, 5486, 165, 60429, 660, 23, 2], [0, 1310, 49083, 443, 269, 71, 5486, 165, 60429, 660, 23, 2], [0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2], ]) lang_ids = torch.LongTensor([0, 8, 8, 0]) expected_output_shape = torch.Size((4, 12, 768)) expected_output_values_last_dim = torch.tensor([ [-0.2394, -0.0036, 0.1252, -0.0087, 0.1325, 0.0580, -0.2049, -0.1978, -0.1223, 0.0648, -0.2599, -0.3724], [-0.2668, -0.0235, -0.1739, 0.2266, -0.0901, -0.3482, 0.0105, -0.1915, 0.0397, 0.3822, 0.1836, -0.3407], [-0.2668, -0.0235, -0.1739, 0.2266, -0.0901, -0.3482, 0.0105, -0.1915, 0.0397, 0.3822, 0.1836, -0.3407], [-0.2394, -0.0036, 0.1252, -0.0087, 0.1325, 0.0580, -0.2049, -0.1978, -0.1223, 0.0648, -0.2599, -0.3724], ]) output = model(input_ids, lang_ids=lang_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3)) @slow def test_end_to_end_mask_fill(self): tokenizer = XLMRobertaTokenizer.from_pretrained("xlm-roberta-base") model = XmodForMaskedLM.from_pretrained("facebook/xmod-base", default_language="en_XX") model.to(torch_device) sentences = [ "Hello, my dog is a little <mask>.", "Hi <mask>!", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) probs = outputs.logits.softmax(dim=-1) _, predictions = probs.topk(1) predictions = predictions.squeeze(-1) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model(input_ids=inputs_non_padded) probs_non_padded = output_non_padded.logits.softmax(dim=-1) _, predictions_non_padded = probs_non_padded.topk(1) predictions_non_padded = predictions_non_padded.squeeze(-1) inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model(input_ids=inputs_padded) probs_padded = output_padded.logits.softmax(dim=-1) _, predictions_padded = probs_padded.topk(1) predictions_padded = predictions_padded.squeeze(-1) batch_out_sentence = tokenizer.batch_decode(predictions, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(predictions_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(predictions_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little girl.", "Hi everyone!", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(batch_out_sentence, [non_padded_sentence, padded_sentence])
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting sizelongestedge maxresolution we re effectively not testing this p this function computes the expected height and width when providing images to yolosimageprocessor assuming doresize is set to true with a scalar size initialize imageprocessings create random pytorch tensors test whether the method pad and calling the image processor return the same tensors prepare image and target encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify origsize verify size prepare image target and maskspath encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify masks verify origsize verify size coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting size longest_edge max_resolution we re effectively not testing this p this function computes the expected height and width when providing images to yolosimageprocessor assuming do_resize is set to true with a scalar size initialize image_processings create random pytorch tensors test whether the method pad and calling the image processor return the same tensors prepare image and target encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify orig_size verify size prepare image target and masks_path encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify masks verify orig_size verify size
import json import pathlib import unittest from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class YolosImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class YolosImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = YolosImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = YolosImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) def test_equivalence_padding(self): image_processing_1 = self.image_processing_class(**self.image_processor_dict) image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images_with_method = image_processing_1.pad(image_inputs, return_tensors="pt") encoded_images = image_processing_2(image_inputs, return_tensors="pt") self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) ) @slow def test_call_pytorch_with_coco_detection_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} image_processing = YolosImageProcessor.from_pretrained("hustvl/yolos-small") encoding = image_processing(images=image, annotations=target, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") image_processing = YolosImageProcessor(format="coco_panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_masks_sum = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch yolos model import unittest from transformers import yolosconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import yolosforobjectdetection yolosmodel from transformers models yolos modelingyolos import yolospretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class yolosmodeltester def init self parent batchsize13 imagesize30 30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 numlabels3 scopenone ntargets8 numdetectiontokens10 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self scope scope self ntargets ntargets self numdetectiontokens numdetectiontokens we set the expected sequence length which is used in several tests expected sequence length numpatches 1 we add 1 for the cls token numdetectiontokens numpatches imagesize1 patchsize imagesize0 patchsize self expectedseqlen numpatches 1 self numdetectiontokens def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize0 self imagesize1 labels none if self uselabels labels is a list of dict each dict being the labels for a given example in the batch labels for i in rangeself batchsize target targetclasslabels torch randint highself numlabels sizeself ntargets devicetorchdevice targetboxes torch randself ntargets 4 devicetorchdevice labels appendtarget config self getconfig return config pixelvalues labels def getconfigself return yolosconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange numdetectiontokensself numdetectiontokens numlabelsself numlabels def createandcheckmodelself config pixelvalues labels model yolosmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result lasthiddenstate shape self batchsize self expectedseqlen self hiddensize def createandcheckforobjectdetectionself config pixelvalues labels model yolosforobjectdetectionconfig model totorchdevice model eval result modelpixelvaluespixelvalues result modelpixelvalues self parent assertequalresult logits shape self batchsize self numdetectiontokens self numlabels 1 self parent assertequalresult predboxes shape self batchsize self numdetectiontokens 4 result modelpixelvaluespixelvalues labelslabels self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self numdetectiontokens self numlabels 1 self parent assertequalresult predboxes shape self batchsize self numdetectiontokens 4 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class yolosmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses yolosmodel yolosforobjectdetection if istorchavailable else pipelinemodelmapping featureextraction yolosmodel objectdetection yolosforobjectdetection if istorchavailable else testpruning false testresizeembeddings false testheadmasking false testtorchscript false special case for head model def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass name yolosforobjectdetection labels for i in rangeself modeltester batchsize target targetclasslabels torch ones sizeself modeltester ntargets devicetorchdevice dtypetorch long targetboxes torch ones self modeltester ntargets 4 devicetorchdevice dtypetorch float labels appendtarget inputsdictlabels labels return inputsdict def setupself self modeltester yolosmodeltesterself self configtester configtesterself configclassyolosconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests def testinputsembedsself yolos does not use inputsembeds pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true in yolos the seqlen is different seqlen self modeltester expectedseqlen for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads seqlen seqlen outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads seqlen seqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers yolos has a different seqlength seqlength self modeltester expectedseqlen self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testforobjectdetectionself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforobjectdetectionconfigandinputs slow def testmodelfrompretrainedself for modelname in yolospretrainedmodelarchivelist 1 model yolosmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class yolosmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedhustvlyolossmall if isvisionavailable else none slow def testinferenceobjectdetectionheadself model yolosforobjectdetection frompretrainedhustvlyolossmall totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs pixelvalues verify outputs expectedshape torch size1 100 92 self assertequaloutputs logits shape expectedshape expectedslicelogits torch tensor 24 0248 10 3024 14 8290 42 0392 16 8200 27 4334 27 2743 11 8154 18 7148 devicetorchdevice expectedsliceboxes torch tensor 0 2559 0 5455 0 4706 0 2989 0 7279 0 1875 0 7732 0 4017 0 4462 devicetorchdevice self asserttruetorch allcloseoutputs logits0 3 3 expectedslicelogits atol1e4 self asserttruetorch allcloseoutputs predboxes0 3 3 expectedsliceboxes atol1e4 verify postprocessing results imageprocessor postprocessobjectdetection outputs threshold0 3 targetsizesimage size 1 0 expectedscores torch tensor0 9994 0 9790 0 9964 0 9972 0 9861 totorchdevice expectedlabels 75 75 17 63 17 expectedsliceboxes torch tensor335 0609 79 3848 375 4216 187 2495 totorchdevice self assertequallenresultsscores 5 self asserttruetorch allcloseresultsscores expectedscores atol1e4 self assertsequenceequalresultslabels tolist expectedlabels self asserttruetorch allcloseresultsboxes0 expectedsliceboxes coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch yolos model we set the expected sequence length which is used in several tests expected sequence length num_patches 1 we add 1 for the cls token num_detection_tokens labels is a list of dict each dict being the labels for a given example in the batch here we also overwrite some of the tests of test_modeling_common py as yolos does not use input_ids inputs_embeds attention_mask and seq_length special case for head model yolos does not use inputs_embeds in yolos the seq_len is different check that output_attentions also work using config check attention is always last and order is fine yolos has a different seq_length check that output_hidden_states also work using config we will verify our results on an image of cute cats forward pass verify outputs verify postprocessing
import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class YolosModelTester: def __init__( self, parent, batch_size=13, image_size=[30, 30], patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, n_targets=8, num_detection_tokens=10, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.n_targets = n_targets self.num_detection_tokens = num_detection_tokens num_patches = (image_size[1] // patch_size) * (image_size[0] // patch_size) self.expected_seq_len = num_patches + 1 + self.num_detection_tokens def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) labels = None if self.use_labels: labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, labels def get_config(self): return YolosConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = YolosModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) ) def create_and_check_for_object_detection(self, config, pixel_values, labels): model = YolosForObjectDetection(config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4)) result = model(pixel_values=pixel_values, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class YolosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (YolosModel, YolosForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "YolosForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = YolosModelTester(self) self.config_tester = ConfigTester(self, config_class=YolosConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = self.model_tester.expected_seq_len for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_object_detection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = YolosModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class YolosModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("hustvl/yolos-small") if is_vision_available() else None @slow def test_inference_object_detection_head(self): model = YolosForObjectDetection.from_pretrained("hustvl/yolos-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(inputs.pixel_values) expected_shape = torch.Size((1, 100, 92)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice_logits = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]], device=torch_device, ) expected_slice_boxes = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]], device=torch_device ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(torch_device) expected_labels = [75, 75, 17, 63, 17] expected_slice_boxes = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch yoso model import unittest from transformers import yosoconfig istorchavailable from transformers testingutils import requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import yosoformaskedlm yosoformultiplechoice yosoforquestionanswering yosoforsequenceclassification yosofortokenclassification yosomodel from transformers models yoso modelingyoso import yosopretrainedmodelarchivelist class yosomodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels def getconfigself return yosoconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange def getpipelineconfigself config self getconfig config vocabsize 300 return config def prepareconfigandinputsfordecoderself config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels self prepareconfigandinputs config isdecoder true encoderhiddenstates floatstensorself batchsize self seqlength self hiddensize encoderattentionmask idstensorself batchsize self seqlength vocabsize2 return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model yosomodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckmodelasdecoder self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask config addcrossattention true model yosomodelconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask result model inputids attentionmaskinputmask tokentypeidstokentypeids encoderhiddenstatesencoderhiddenstates result modelinputids attentionmaskinputmask tokentypeidstokentypeids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckformaskedlm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model yosoformaskedlmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckforquestionanswering self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model yosoforquestionansweringconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforsequenceclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model yosoforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model yosofortokenclassificationconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckformultiplechoice self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numchoices self numchoices model yosoformultiplechoiceconfigconfig model totorchdevice model eval multiplechoiceinputsids inputids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoicetokentypeids tokentypeids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceinputmask inputmask unsqueeze1 expand1 self numchoices 1 contiguous result model multiplechoiceinputsids attentionmaskmultiplechoiceinputmask tokentypeidsmultiplechoicetokentypeids labelschoicelabels self parent assertequalresult logits shape self batchsize self numchoices def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class yosomodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses yosomodel yosoformaskedlm yosoformultiplechoice yosoforquestionanswering yosoforsequenceclassification yosofortokenclassification if istorchavailable else testpruning false testheadmasking false testtorchscript false allgenerativemodelclasses pipelinemodelmapping featureextraction yosomodel fillmask yosoformaskedlm questionanswering yosoforquestionanswering textclassification yosoforsequenceclassification tokenclassification yosofortokenclassification zeroshot yosoforsequenceclassification if istorchavailable else def setupself self modeltester yosomodeltesterself self configtester configtesterself configclassyosoconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in yosopretrainedmodelarchivelist 1 model yosomodel frompretrainedmodelname self assertisnotnonemodel def testattentionoutputsself return requiretorch class yosomodelintegrationtestunittest testcase slow def testinferencenoheadself model yosomodel frompretraineduwmadisonyoso4096 inputids torch tensor0 1 2 3 4 5 with torch nograd output modelinputids0 expectedshape torch size1 6 768 self assertequaloutput shape expectedshape expectedslice torch tensor 0 0611 0 1242 0 0840 0 0280 0 0048 0 1125 0 0106 0 0226 0 0751 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 slow def testinferencemaskedlmself model yosoformaskedlm frompretraineduwmadisonyoso4096 inputids torch tensor0 1 2 3 4 5 with torch nograd output modelinputids0 vocabsize 50265 expectedshape torch size1 6 vocabsize self assertequaloutput shape expectedshape expectedslice torch tensor 2 1313 3 7285 2 2407 2 7047 3 3314 2 6408 0 0629 2 5166 0 3356 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 slow def testinferencemaskedlmlonginputself model yosoformaskedlm frompretraineduwmadisonyoso4096 inputids torch arange4096 unsqueeze0 with torch nograd output modelinputids0 vocabsize 50265 expectedshape torch size1 4096 vocabsize self assertequaloutput shape expectedshape expectedslice torch tensor 2 3914 4 3742 5 0956 4 0988 4 2384 7 0406 3 1427 3 7192 6 6800 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch yoso model
import unittest from transformers import YosoConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( YosoForMaskedLM, YosoForMultipleChoice, YosoForQuestionAnswering, YosoForSequenceClassification, YosoForTokenClassification, YosoModel, ) from transformers.models.yoso.modeling_yoso import YOSO_PRETRAINED_MODEL_ARCHIVE_LIST class YosoModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return YosoConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = YosoModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = YosoModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = YosoForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = YosoForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = YosoForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = YosoForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = YosoForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class YosoModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( YosoModel, YosoForMaskedLM, YosoForMultipleChoice, YosoForQuestionAnswering, YosoForSequenceClassification, YosoForTokenClassification, ) if is_torch_available() else () ) test_pruning = False test_headmasking = False test_torchscript = False all_generative_model_classes = () pipeline_model_mapping = ( { "feature-extraction": YosoModel, "fill-mask": YosoForMaskedLM, "question-answering": YosoForQuestionAnswering, "text-classification": YosoForSequenceClassification, "token-classification": YosoForTokenClassification, "zero-shot": YosoForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = YosoModelTester(self) self.config_tester = ConfigTester(self, config_class=YosoConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in YOSO_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = YosoModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): return @require_torch class YosoModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = YosoModel.from_pretrained("uw-madison/yoso-4096") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0611, 0.1242, 0.0840], [0.0280, -0.0048, 0.1125], [0.0106, 0.0226, 0.0751]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_masked_lm(self): model = YosoForMaskedLM.from_pretrained("uw-madison/yoso-4096") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] vocab_size = 50265 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.1313, -3.7285, -2.2407], [-2.7047, -3.3314, -2.6408], [0.0629, -2.5166, -0.3356]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_masked_lm_long_input(self): model = YosoForMaskedLM.from_pretrained("uw-madison/yoso-4096") input_ids = torch.arange(4096).unsqueeze(0) with torch.no_grad(): output = model(input_ids)[0] vocab_size = 50265 expected_shape = torch.Size((1, 4096, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.3914, -4.3742, -5.0956], [-4.0988, -4.2384, -7.0406], [-3.1427, -3.7192, -6.6800]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license no warmup constant schedule no gradient clipping no warmup constant schedule no gradient clipping schedulers doct format function schedargsdict expectedlearningrates see https github comhuggingfacetransformersissues21689 def initself fn self fn fn def callself args kwargs return self fnargs kwargs classmethod def wrapschedulerself scheduler scheduler lrlambdas listmapself scheduler lrlambdas coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license no warmup constant schedule no gradient clipping no zero_grad function on simple tensors we do it ourselves no warmup constant schedule no gradient clipping no zero_grad function on simple tensors we do it ourselves schedulers doct format function sched_args_dict expected_learning_rates wrap to test picklability of the schedule see https github com huggingface transformers issues 21689
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def unwrap_schedule(scheduler, num_steps=10): lrs = [] for _ in range(num_steps): lrs.append(scheduler.get_lr()[0]) scheduler.step() return lrs def unwrap_and_save_reload_schedule(scheduler, num_steps=10): lrs = [] for step in range(num_steps): lrs.append(scheduler.get_lr()[0]) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: file_name = os.path.join(tmpdirname, "schedule.bin") torch.save(scheduler.state_dict(), file_name) state_dict = torch.load(file_name) scheduler.load_state_dict(state_dict) return lrs @require_torch class OptimizationTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) def test_adam_w(self): w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True) target = torch.tensor([0.4, 0.2, -0.5]) criterion = nn.MSELoss() optimizer = AdamW(params=[w], lr=2e-1, weight_decay=0.0) for _ in range(100): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) def test_adafactor(self): w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True) target = torch.tensor([0.4, 0.2, -0.5]) criterion = nn.MSELoss() optimizer = Adafactor( params=[w], lr=1e-2, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False, ) for _ in range(1000): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) @require_torch class ScheduleInitTest(unittest.TestCase): m = nn.Linear(50, 50) if is_torch_available() else None optimizer = AdamW(m.parameters(), lr=10.0) if is_torch_available() else None num_steps = 10 def assertListAlmostEqual(self, list1, list2, tol, msg=None): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol, msg=msg) def test_schedulers(self): common_kwargs = {"num_warmup_steps": 2, "num_training_steps": 10} scheds = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): kwargs, expected_learning_rates = data scheduler = scheduler_func(self.optimizer, **kwargs) self.assertEqual(len([scheduler.get_lr()[0]]), 1) lrs_1 = unwrap_schedule(scheduler, self.num_steps) self.assertListAlmostEqual( lrs_1, expected_learning_rates, tol=1e-2, msg=f"failed for {scheduler_func} in normal scheduler", ) scheduler = scheduler_func(self.optimizer, **kwargs) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(scheduler) lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps) self.assertListEqual(lrs_1, lrs_2, msg=f"failed for {scheduler_func} in save and reload") class LambdaScheduleWrapper: def __init__(self, fn): self.fn = fn def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) @classmethod def wrap_scheduler(self, scheduler): scheduler.lr_lambdas = list(map(self, scheduler.lr_lambdas))
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class OptimizationFTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) def testGradientAccumulator(self): accumulator = GradientAccumulator() accumulator([tf.constant([1.0, 2.0])]) accumulator([tf.constant([-2.0, 1.0])]) accumulator([tf.constant([-1.0, 2.0])]) with self.assertRaises(ValueError): accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])]) self.assertEqual(accumulator.step, 3) self.assertEqual(len(accumulator.gradients), 1) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2) accumulator.reset() self.assertEqual(accumulator.step, 0) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2) def testGradientAccumulatorDistributionStrategy(self): context._context = None ops.enable_eager_execution_internal() physical_devices = tf.config.list_physical_devices("CPU") if len(physical_devices) == 1: tf.config.set_logical_device_configuration( physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) devices = tf.config.list_logical_devices(device_type="CPU") strategy = tf.distribute.MirroredStrategy(devices=devices[:2]) with strategy.scope(): accumulator = GradientAccumulator() variable = tf.Variable([4.0, 3.0]) optimizer, _ = create_optimizer(5e-5, 10, 5) gradient_placeholder = tf.Variable([0.0, 0.0], trainable=False) def accumulate_on_replica(gradient): accumulator([gradient]) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients, [variable]))) @tf.function def accumulate(grad1, grad2): with strategy.scope(): local_variables = strategy.experimental_local_results(gradient_placeholder) local_variables[0].assign(grad1) local_variables[1].assign(grad2) strategy.run(accumulate_on_replica, args=(gradient_placeholder,)) @tf.function def apply_grad(): with strategy.scope(): strategy.run(apply_on_replica) def _check_local_values(grad1, grad2): values = strategy.experimental_local_results(accumulator._gradients[0]) self.assertListAlmostEqual(values[0].value(), grad1, tol=1e-2) self.assertListAlmostEqual(values[1].value(), grad2, tol=1e-2) accumulate([1.0, 2.0], [-1.0, 1.0]) accumulate([3.0, -1.0], [-1.0, -1.0]) accumulate([-2.0, 2.0], [3.0, -2.0]) self.assertEqual(accumulator.step, 3) _check_local_values([2.0, 3.0], [1.0, -2.0]) apply_grad() self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2) accumulator.reset() self.assertEqual(accumulator.step, 0) _check_local_values([0.0, 0.0], [0.0, 0.0])
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo run it with ci after peft release a testing suite that makes sure that the peftmodel class is correctly integrated into the transformers library utility method to check if the model has correctly adapters injected on it simple test that tests the basic usage of peft model through frompretrained this checks if we pass a remote folder that contains an adapter config and adapter weights it should correctly load a model that has adapters injected on it dummy generation simple test that checks if the returned state dict of getadapterstatedict method contains the expected keys test that checks various combinations of savepretrained with a model that has adapters loaded on it this checks if the saved model contains the expected files adapter weights and adapter config a test that checks if enableadapters and disableadapters methods work as expected simple test that tests if addadapter works as expected dummy generation simple test that tests if addadapter works as expected simple test that tests if addadapter works as expected when training with modules to save simple test that tests if addadapter works as expected when training with gradient checkpointing when attaching adapters the input embeddings will stay frozen this will lead to the output embedding having requiresgradfalse since here we attached the hook the input should have requiresgrad to set properly to repro the trainer issue simple test that tests the basic usage of peft model through frompretrained this test tests if addadapter works as expected in multiadapter setting dummy generation logits comparison multi active adapter saving not supported simple test that tests the basic usage of peft model through frompretrained additional kwargs and see if the integraiton behaves as expected dummy generation simple test that tests the basic usage of peft model savepretrained with quantized base models 4bit 8bit simple test that tests the basic usage of peft model savepretrained with quantized base models regression test to make sure everything works as expected before the safetensors integration 4bit 8bit simple test that tests the basic usage of peft model pipeline simple test that tests the basic usage of peft model through frompretrained this test tests if addadapter works as expected with a statedict being passed dummy generation tests different combinations of peft model frompretrained hub kwargs this should not work this should work coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo run it with ci after peft release a testing suite that makes sure that the peftmodel class is correctly integrated into the transformers library utility method to check if the model has correctly adapters injected on it simple test that tests the basic usage of peft model through from_pretrained this checks if we pass a remote folder that contains an adapter config and adapter weights it should correctly load a model that has adapters injected on it dummy generation simple test that checks if the returned state dict of get_adapter_state_dict method contains the expected keys test that checks various combinations of save_pretrained with a model that has adapters loaded on it this checks if the saved model contains the expected files adapter weights and adapter config a test that checks if enable_adapters and disable_adapters methods work as expected simple test that tests if add_adapter works as expected dummy generation simple test that tests if add_adapter works as expected simple test that tests if add_adapter works as expected when training with modules to save simple test that tests if add_adapter works as expected when training with gradient checkpointing when attaching adapters the input embeddings will stay frozen this will lead to the output embedding having requires_grad false since here we attached the hook the input should have requires_grad to set properly to repro the trainer issue simple test that tests the basic usage of peft model through from_pretrained this test tests if add_adapter works as expected in multi adapter setting dummy generation logits comparison multi active adapter saving not supported simple test that tests the basic usage of peft model through from_pretrained additional kwargs and see if the integraiton behaves as expected dummy generation simple test that tests the basic usage of peft model save_pretrained with quantized base models 4bit 8 bit simple test that tests the basic usage of peft model save_pretrained with quantized base models regression test to make sure everything works as expected before the safetensors integration 4bit 8 bit simple test that tests the basic usage of peft model pipeline simple test that tests the basic usage of peft model through from_pretrained this test tests if add_adapter works as expected with a state_dict being passed dummy generation tests different combinations of peft model from_pretrained hub kwargs this should not work this should work
import os import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, OPTForCausalLM from transformers.testing_utils import require_peft, require_torch, require_torch_gpu, slow, torch_device from transformers.utils import is_torch_available if is_torch_available(): import torch @require_peft @require_torch class PeftTesterMixin: peft_test_model_ids = ("peft-internal-testing/tiny-OPTForCausalLM-lora",) transformers_test_model_ids = ("hf-internal-testing/tiny-random-OPTForCausalLM",) transformers_test_model_classes = (AutoModelForCausalLM, OPTForCausalLM) @slow class PeftIntegrationTester(unittest.TestCase, PeftTesterMixin): def _check_lora_correctly_converted(self, model): from peft.tuners.tuners_utils import BaseTunerLayer is_peft_loaded = False for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break return is_peft_loaded def test_peft_from_pretrained(self): for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) self.assertTrue(peft_model._hf_peft_config_loaded) _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_state_dict(self): for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) state_dict = peft_model.get_adapter_state_dict() for key in state_dict.keys(): self.assertTrue("lora" in key) def test_peft_save_pretrained(self): for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("config.json" not in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) def test_peft_enable_disable_adapters(self): from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) peft_model.add_adapter(peft_config) peft_logits = peft_model(dummy_input).logits peft_model.disable_adapters() peft_logits_disabled = peft_model(dummy_input).logits peft_model.enable_adapters() peft_logits_enabled = peft_model(dummy_input).logits self.assertTrue(torch.allclose(peft_logits, peft_logits_enabled, atol=1e-12, rtol=1e-12)) self.assertFalse(torch.allclose(peft_logits_enabled, peft_logits_disabled, atol=1e-12, rtol=1e-12)) def test_peft_add_adapter(self): from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) _ = model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_add_adapter_from_pretrained(self): from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_from_pretrained = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(model_from_pretrained)) def test_peft_add_adapter_modules_to_save(self): from peft import LoraConfig from peft.utils import ModulesToSaveWrapper for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False, modules_to_save=["lm_head"]) model.add_adapter(peft_config) self._check_lora_correctly_converted(model) _has_modules_to_save_wrapper = False for name, module in model.named_modules(): if isinstance(module, ModulesToSaveWrapper): _has_modules_to_save_wrapper = True self.assertTrue(module.modules_to_save.default.weight.requires_grad) self.assertTrue("lm_head" in name) break self.assertTrue(_has_modules_to_save_wrapper) state_dict = model.get_adapter_state_dict() self.assertTrue("lm_head.weight" in state_dict.keys()) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for _, param in model.named_parameters(): if param.requires_grad: self.assertTrue(param.grad is not None) def test_peft_add_adapter_training_gradient_checkpointing(self): from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(frozen_output.requires_grad is False) model.gradient_checkpointing_enable() non_frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(non_frozen_output.requires_grad is True) dummy_input.requires_grad = False for name, param in model.named_parameters(): if "lora" in name.lower(): self.assertTrue(param.requires_grad) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for name, param in model.named_parameters(): if param.requires_grad: self.assertTrue("lora" in name.lower()) self.assertTrue(param.grad is not None) def test_peft_add_multi_adapter(self): from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: is_peft_loaded = False model = transformers_class.from_pretrained(model_id).to(torch_device) logits_original_model = model(dummy_input).logits peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) logits_adapter_1 = model(dummy_input) model.add_adapter(peft_config, adapter_name="adapter-2") logits_adapter_2 = model(dummy_input) for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break self.assertTrue(is_peft_loaded) _ = model.generate(input_ids=dummy_input) model.set_adapter("default") self.assertTrue(model.active_adapters() == ["default"]) self.assertTrue(model.active_adapter() == "default") model.set_adapter("adapter-2") self.assertTrue(model.active_adapters() == ["adapter-2"]) self.assertTrue(model.active_adapter() == "adapter-2") self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_2.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse(torch.allclose(logits_original_model, logits_adapter_2.logits, atol=1e-6, rtol=1e-6)) model.set_adapter(["adapter-2", "default"]) self.assertTrue(model.active_adapters() == ["adapter-2", "default"]) self.assertTrue(model.active_adapter() == "adapter-2") logits_adapter_mixed = model(dummy_input) self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse( torch.allclose(logits_adapter_2.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) @require_torch_gpu def test_peft_from_pretrained_kwargs(self): for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) @require_torch_gpu def test_peft_save_quantized(self): for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) @require_torch_gpu def test_peft_save_quantized_regression(self): for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) def test_peft_pipeline(self): from transformers import pipeline for model_id in self.peft_test_model_ids: pipe = pipeline("text-generation", model_id) _ = pipe("Hello") def test_peft_add_adapter_with_state_dict(self): from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) with self.assertRaises(ValueError): model.load_adapter(peft_model_id=None) state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") dummy_state_dict = torch.load(state_dict_path) model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=peft_config) with self.assertRaises(ValueError): model.load_adapter(model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=None)) self.assertTrue(self._check_lora_correctly_converted(model)) _ = model.generate(input_ids=dummy_input) def test_peft_from_pretrained_hub_kwargs(self): peft_model_id = "peft-internal-testing/tiny-opt-lora-revision" with self.assertRaises(OSError): _ = AutoModelForCausalLM.from_pretrained(peft_model_id) adapter_kwargs = {"revision": "test"} model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) adapter_kwargs = {"revision": "main", "subfolder": "test_subfolder"} model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model))
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test with a raw waveform by default a model is initialized with numlabels2 test with a local file 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test with a raw waveform by default a model is initialized with num_labels 2 test with a local file
import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class AudioClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): audio_classifier = AudioClassificationPipeline(model=model, feature_extractor=processor) audio = np.zeros((34000,)) audio2 = np.zeros((14000,)) return audio_classifier, [audio2, audio] def run_pipeline_test(self, audio_classifier, examples): audio2, audio = examples output = audio_classifier(audio) self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) output = audio_classifier(audio, top_k=1) self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, ], ) self.run_torchaudio(audio_classifier) @require_torchaudio def run_torchaudio(self, audio_classifier): import datasets dataset = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio = dataset[0]["audio"]["array"] output = audio_classifier(audio) self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): model = "anton-l/wav2vec2-random-tiny-classifier" audio_classifier = pipeline("audio-classification", model=model) audio = np.ones((8000,)) output = audio_classifier(audio, top_k=4) EXPECTED_OUTPUT = [ {"score": 0.0842, "label": "no"}, {"score": 0.0838, "label": "up"}, {"score": 0.0837, "label": "go"}, {"score": 0.0834, "label": "right"}, ] EXPECTED_OUTPUT_PT_2 = [ {"score": 0.0845, "label": "stop"}, {"score": 0.0844, "label": "on"}, {"score": 0.0841, "label": "right"}, {"score": 0.0834, "label": "left"}, ] self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) audio_dict = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate} output = audio_classifier(audio_dict, top_k=4) self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) @require_torch @slow def test_large_model_pt(self): import datasets model = "superb/wav2vec2-base-superb-ks" audio_classifier = pipeline("audio-classification", model=model) dataset = datasets.load_dataset("anton-l/superb_dummy", "ks", split="test") audio = np.array(dataset[3]["speech"], dtype=np.float32) output = audio_classifier(audio, top_k=4) self.assertEqual( nested_simplify(output, decimals=3), [ {"score": 0.981, "label": "go"}, {"score": 0.007, "label": "up"}, {"score": 0.006, "label": "_unknown_"}, {"score": 0.001, "label": "down"}, ], ) @require_tf @unittest.skip("Audio classification is not implemented for TF") def test_small_model_tf(self): pass
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license dynamically import the transformers module to grab the attribute classes of the processor form their names wrong framework when using multiple workers on streamable data it should still work this will force using numworkers1 with a warning for now used to throw an error because hiddenstates are a tuple of tensors instead of the expected tensor isfalse across batch to avoid regressing pipeline used to accept device1 test table in seperate test due to more dependencies cleanup as much as possible gpu memory occupied by pytorch test table in seperate test due to more dependencies cleanup as much as possible gpu memory occupied by pytorch cleanup as much as possible gpu memory occupied by pytorch cleanup as much as possible gpu memory occupied by pytorch test to compare pipeline to manually loading the respective model task has no default by default use first class retrieve correct model ids special case for translation pipeline which has multiple languages normal case nontranslation pipeline check for equality load default model first auto class is possible not compatible with model go to next model class load default pipeline compare pipeline model with default model get the original task so we can restore it at the end otherwise the subsequential tests in textclassificationpipelinetests will fail restore clean registry for next tests clean registry as we won t need the pipeline to be in it for the rest to work checks fails if the user forget to pass along trustremotecodetrue using trustremotecodefalse forces the traditional pipeline tag can t make an isinstance check because the newclassifier is from the pairclassificationpipeline class of a dynamic module make sure we have cached the pipeline make sure we have cached the pipeline for some reason scoping doesn t work if not using self clean registry as we won t need the pipeline to be in it for the rest to work checks fails if the user forget to pass along trustremotecodetrue can t make an isinstance check because the newclassifier is from the pairclassificationpipeline class of a dynamic module using trustremotecodefalse forces the traditional pipeline tag 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 dynamically import the transformers module to grab the attribute classes of the processor form their names wrong framework when using multiple workers on streamable data it should still work this will force using num_workers 1 with a warning for now used to throw an error because hidden_states are a tuple of tensors instead of the expected tensor is_false across batch to avoid regressing pipeline used to accept device 1 noqa e731 test table in seperate test due to more dependencies clean up as much as possible gpu memory occupied by pytorch noqa e731 test table in seperate test due to more dependencies clean up as much as possible gpu memory occupied by pytorch noqa e731 clean up as much as possible gpu memory occupied by pytorch noqa e731 clean up as much as possible gpu memory occupied by pytorch test to compare pipeline to manually loading the respective model task has no default by default use first class retrieve correct model ids special case for translation pipeline which has multiple languages normal case non translation pipeline check for equality load default model first auto class is possible not compatible with model go to next model class load default pipeline compare pipeline model with default model get the original task so we can restore it at the end otherwise the subsequential tests in textclassificationpipelinetests will fail restore clean registry for next tests clean registry as we won t need the pipeline to be in it for the rest to work checks fails if the user forget to pass along trust_remote_code true using trust_remote_code false forces the traditional pipeline tag can t make an isinstance check because the new_classifier is from the pairclassificationpipeline class of a dynamic module make sure we have cached the pipeline make sure we have cached the pipeline for some reason scoping doesn t work if not using self clean registry as we won t need the pipeline to be in it for the rest to work checks fails if the user forget to pass along trust_remote_code true can t make an isinstance check because the new_classifier is from the pairclassificationpipeline class of a dynamic module using trust_remote_code false forces the traditional pipeline tag
import gc import logging import os import sys import tempfile import unittest from pathlib import Path import datasets import numpy as np from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DistilBertForSequenceClassification, TextClassificationPipeline, TFAutoModelForSequenceClassification, pipeline, ) from transformers.pipelines import PIPELINE_REGISTRY, get_task from transformers.pipelines.base import Pipeline, _pad from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, RequestCounter, backend_empty_cache, is_pipeline_test, is_staging_test, nested_simplify, require_tensorflow_probability, require_tf, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.utils import direct_transformers_import, is_tf_available, is_torch_available from transformers.utils import logging as transformers_logging sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_pipeline import PairClassificationPipeline logger = logging.getLogger(__name__) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent.parent, "src/transformers") transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) class ANY: def __init__(self, *_types): self._types = _types def __eq__(self, other): return isinstance(other, self._types) def __repr__(self): return f"ANY({', '.join(_type.__name__ for _type in self._types)})" @is_pipeline_test class CommonPipelineTest(unittest.TestCase): @require_torch def test_pipeline_iteration(self): from torch.utils.data import Dataset class MyDataset(Dataset): data = [ "This is a test", "This restaurant is great", "This restaurant is awful", ] def __len__(self): return 3 def __getitem__(self, i): return self.data[i] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) dataset = MyDataset() for output in text_classifier(dataset): self.assertEqual(output, {"label": ANY(str), "score": ANY(float)}) @require_torch def test_check_task_auto_inference(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertIsInstance(pipe, TextClassificationPipeline) @require_torch def test_pipeline_batch_size_global(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertEqual(pipe._batch_size, None) self.assertEqual(pipe._num_workers, None) pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1) self.assertEqual(pipe._batch_size, 2) self.assertEqual(pipe._num_workers, 1) @require_torch def test_pipeline_pathlike(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") with tempfile.TemporaryDirectory() as d: pipe.save_pretrained(d) path = Path(d) newpipe = pipeline(task="text-classification", model=path) self.assertIsInstance(newpipe, TextClassificationPipeline) @require_torch def test_pipeline_override(self): class MyPipeline(TextClassificationPipeline): pass text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline) self.assertIsInstance(text_classifier, MyPipeline) def test_check_task(self): task = get_task("gpt2") self.assertEqual(task, "text-generation") with self.assertRaises(RuntimeError): get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best") @require_torch def test_iterator_data(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) results = [] for out in pipe(data(10), num_workers=2): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_tf def test_iterator_data_tf(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf") out = pipe("This is a test") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_torch def test_unbatch_attentions_hidden_states(self): model = DistilBertForSequenceClassification.from_pretrained( "hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert") text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) outputs = text_classifier(["This is great !"] * 20, batch_size=32) self.assertEqual(len(outputs), 20) @is_pipeline_test class PipelineScikitCompatTest(unittest.TestCase): @require_torch def test_pipeline_predict_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_predict_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_torch def test_pipeline_transform_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_transform_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @is_pipeline_test class PipelinePadTest(unittest.TestCase): @require_torch def test_pipeline_padding(self): import torch items = [ { "label": "label1", "input_ids": torch.LongTensor([[1, 23, 24, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 0]]), }, { "label": "label2", "input_ids": torch.LongTensor([[1, 23, 24, 43, 44, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 1, 1, 0]]), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "right"), torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "left"), torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "attention_mask", 0, "right"), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]]) ) ) @require_torch def test_pipeline_image_padding(self): import torch items = [ { "label": "label1", "pixel_values": torch.zeros((1, 3, 10, 10)), }, { "label": "label2", "pixel_values": torch.zeros((1, 3, 10, 10)), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "pixel_values", 10, "right"), torch.zeros((2, 3, 10, 10)), ) ) @require_torch def test_pipeline_offset_mapping(self): import torch items = [ { "offset_mappings": torch.zeros([1, 11, 2], dtype=torch.long), }, { "offset_mappings": torch.zeros([1, 4, 2], dtype=torch.long), }, ] self.assertTrue( torch.allclose( _pad(items, "offset_mappings", 0, "right"), torch.zeros((2, 11, 2), dtype=torch.long), ), ) @is_pipeline_test class PipelineUtilsTest(unittest.TestCase): @require_torch def test_pipeline_dataset(self): from transformers.pipelines.pt_utils import PipelineDataset dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineDataset(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = [dataset[i] for i in range(4)] self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator_no_len(self): from transformers.pipelines.pt_utils import PipelineIterator def dummy_dataset(): for i in range(4): yield i def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset(), add, {"extra": 2}) with self.assertRaises(TypeError): len(dataset) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_batch_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": [0, 1, 2]}, {"id": [3]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]]} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]) @require_torch def test_pipeline_batch_unbatch_iterator_tensors(self): import torch from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": torch.LongTensor([[10, 20], [0, 1], [0, 2]])}, {"id": torch.LongTensor([[3]])}] def add(number, extra=0): return {"id": number["id"] + extra} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual( nested_simplify(outputs), [{"id": [[12, 22]]}, {"id": [[2, 3]]}, {"id": [[2, 4]]}, {"id": [[5]]}] ) @require_torch def test_pipeline_chunk_iterator(self): from transformers.pipelines.pt_utils import PipelineChunkIterator def preprocess_chunk(n: int): for i in range(n): yield i dataset = [2, 3] dataset = PipelineChunkIterator(dataset, preprocess_chunk, {}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [0, 1, 0, 1, 2]) @require_torch def test_pipeline_pack_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator def pack(item): return {"id": item["id"] + 1, "is_last": item["is_last"]} dataset = [ {"id": 0, "is_last": False}, {"id": 1, "is_last": True}, {"id": 0, "is_last": False}, {"id": 1, "is_last": False}, {"id": 2, "is_last": True}, ] dataset = PipelinePackIterator(dataset, pack, {}) outputs = list(dataset) self.assertEqual( outputs, [ [ {"id": 1}, {"id": 2}, ], [ {"id": 1}, {"id": 2}, {"id": 3}, ], ], ) @require_torch def test_pipeline_pack_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, True, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}], [{"id": 4}, {"id": 5}]]) dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, False, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]]) def test_pipeline_negative_device(self): classifier = pipeline("text-generation", "hf-internal-testing/tiny-random-bert", device=-1) expected_output = [{"generated_text": ANY(str)}] actual_output = classifier("Test input.") self.assertEqual(expected_output, actual_output) @slow @require_torch def test_load_default_pipelines_pt(self): import torch from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: torch.manual_seed(0) for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": continue self.check_default_pipeline(task, "pt", set_seed_fn, self.check_models_equal_pt) gc.collect() backend_empty_cache(torch_device) @slow @require_tf def test_load_default_pipelines_tf(self): import tensorflow as tf from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: tf.random.set_seed(0) for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": continue self.check_default_pipeline(task, "tf", set_seed_fn, self.check_models_equal_tf) gc.collect() @slow @require_torch def test_load_default_pipelines_pt_table_qa(self): import torch set_seed_fn = lambda: torch.manual_seed(0) self.check_default_pipeline("table-question-answering", "pt", set_seed_fn, self.check_models_equal_pt) gc.collect() backend_empty_cache(torch_device) @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator_indexed(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_tf @require_tensorflow_probability def test_load_default_pipelines_tf_table_qa(self): import tensorflow as tf set_seed_fn = lambda: tf.random.set_seed(0) self.check_default_pipeline("table-question-answering", "tf", set_seed_fn, self.check_models_equal_tf) gc.collect() def check_default_pipeline(self, task, framework, set_seed_fn, check_models_equal_fn): from transformers.pipelines import SUPPORTED_TASKS, pipeline task_dict = SUPPORTED_TASKS[task] model = None relevant_auto_classes = task_dict[framework] if len(relevant_auto_classes) == 0: logger.debug(f"{task} in {framework} has no default") return auto_model_cls = relevant_auto_classes[0] if task == "translation": model_ids = [] revisions = [] tasks = [] for translation_pair in task_dict["default"].keys(): model_id, revision = task_dict["default"][translation_pair]["model"][framework] model_ids.append(model_id) revisions.append(revision) tasks.append(task + f"_{'_to_'.join(translation_pair)}") else: model_id, revision = task_dict["default"]["model"][framework] model_ids = [model_id] revisions = [revision] tasks = [task] for model_id, revision, task in zip(model_ids, revisions, tasks): try: set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) except ValueError: auto_model_cls = relevant_auto_classes[1] set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) set_seed_fn() default_pipeline = pipeline(task, framework=framework) models_are_equal = check_models_equal_fn(default_pipeline.model, model) self.assertTrue(models_are_equal, f"{task} model doesn't match pipeline.") logger.debug(f"{task} in {framework} succeeded with {model_id}.") def check_models_equal_pt(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal def check_models_equal_tf(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.weights, model2.weights): if np.abs(model1_p.numpy() - model2_p.numpy()).sum() > 1e-5: models_are_equal = False return models_are_equal class CustomPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, text, maybe_arg=2): input_ids = self.tokenizer(text, return_tensors="pt") return input_ids def _forward(self, model_inputs): outputs = self.model(**model_inputs) return outputs def postprocess(self, model_outputs): return model_outputs["logits"].softmax(-1).numpy() @is_pipeline_test class CustomPipelineTest(unittest.TestCase): def test_warning_logs(self): transformers_logging.set_verbosity_debug() logger_ = transformers_logging.get_logger("transformers.pipelines.base") alias = "text-classification" _, original_task, _ = PIPELINE_REGISTRY.check_task(alias) try: with CaptureLogger(logger_) as cm: PIPELINE_REGISTRY.register_pipeline(alias, PairClassificationPipeline) self.assertIn(f"{alias} is already registered", cm.out) finally: PIPELINE_REGISTRY.supported_tasks[alias] = original_task def test_register_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "custom-text-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, default={"pt": "hf-internal-testing/tiny-random-distilbert"}, type="text", ) assert "custom-text-classification" in PIPELINE_REGISTRY.get_supported_tasks() _, task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification") self.assertEqual(task_def["pt"], (AutoModelForSequenceClassification,) if is_torch_available() else ()) self.assertEqual(task_def["tf"], (TFAutoModelForSequenceClassification,) if is_tf_available() else ()) self.assertEqual(task_def["type"], "text") self.assertEqual(task_def["impl"], PairClassificationPipeline) self.assertEqual(task_def["default"], {"model": {"pt": "hf-internal-testing/tiny-random-distilbert"}}) del PIPELINE_REGISTRY.supported_tasks["custom-text-classification"] @require_torch_or_tf def test_dynamic_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, ) classifier = pipeline("pair-classification", model="hf-internal-testing/tiny-random-bert") del PIPELINE_REGISTRY.supported_tasks["pair-classification"] with tempfile.TemporaryDirectory() as tmp_dir: classifier.save_pretrained(tmp_dir) self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",) if is_torch_available() else (), "tf": ("TFAutoModelForSequenceClassification",) if is_tf_available() else (), } }, ) with self.assertRaises(ValueError): _ = pipeline(model=tmp_dir) new_classifier = pipeline(model=tmp_dir, trust_remote_code=True) old_classifier = pipeline("text-classification", model=tmp_dir, trust_remote_code=False) self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") self.assertEqual(new_classifier.task, "pair-classification") results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual( nested_simplify(results), {"label": "LABEL_0", "score": 0.505, "logits": [-0.003, -0.024]}, ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify(results), [{"label": "LABEL_0", "score": 0.505}], ) @require_torch_or_tf def test_cached_pipeline_has_minimum_calls_to_head(self): _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) @require_torch def test_chunk_pipeline_batching_single_file(self): pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") self.COUNT = 0 forward = pipe.model.forward def new_forward(*args, **kwargs): self.COUNT += 1 return forward(*args, **kwargs) pipe.model.forward = new_forward for out in pipe(audio, return_timestamps="char", chunk_length_s=3, stride_length_s=[1, 1], batch_size=1024): pass self.assertEqual(self.COUNT, 1) @require_torch @is_staging_test class DynamicPipelineTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "I", "love", "hate", "you"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-dynamic-pipeline") except HTTPError: pass def test_push_to_hub_dynamic_pipeline(self): from transformers import BertConfig, BertForSequenceClassification, BertTokenizer PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, ) config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertForSequenceClassification(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"{USER}/test-dynamic-pipeline", token=self._token) repo = Repository(tmp_dir, clone_from=f"{USER}/test-dynamic-pipeline", token=self._token) vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) classifier = pipeline("pair-classification", model=model, tokenizer=tokenizer) del PIPELINE_REGISTRY.supported_tasks["pair-classification"] classifier.save_pretrained(tmp_dir) self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",), "tf": (), } }, ) repo.push_to_hub() with self.assertRaises(ValueError): _ = pipeline(model=f"{USER}/test-dynamic-pipeline") new_classifier = pipeline(model=f"{USER}/test-dynamic-pipeline", trust_remote_code=True) self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") results = classifier("I hate you", second_text="I love you") new_results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual(nested_simplify(results), nested_simplify(new_results)) old_classifier = pipeline( "text-classification", model=f"{USER}/test-dynamic-pipeline", trust_remote_code=False ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") new_results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify([{"label": results["label"], "score": results["score"]}]), nested_simplify(new_results) )
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license rgba la l this seems flaky self assertequaloutputsdepth 1a39394e282e9f3b0741a90b9f108977 this is highly irregular to have no small tests 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license rgba la l this seems flaky self assertequal outputs depth 1a39394e282e9f3b0741a90b9f108977 this is highly irregular to have no small tests
import unittest from huggingface_hub.utils import insecure_hashlib from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class DepthEstimationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): depth_estimator = DepthEstimationPipeline(model=model, image_processor=processor) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, depth_estimator, examples): outputs = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs) import datasets dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") outputs = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", dataset[0]["file"], dataset[1]["file"], dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, ], outputs, ) @require_tf @unittest.skip("Depth estimation is not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): model_id = "Intel/dpt-large" depth_estimator = pipeline("depth-estimation", model=model_id) outputs = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg") outputs["depth"] = hashimage(outputs["depth"]) self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item()), 29.304) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item()), 2.662) @require_torch def test_small_model_pt(self): self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT")
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this is a pinned image from a specific revision of a document question answering space hosted by huggingface so we can expect it to be available this image does not detect any text in it meaning layoutlmv2 should fail empty answer probably we can optionnally pass directly the words and bounding boxes todo enable this once hfinternaltestingtinyrandomdonut is implemented requiretorch def testsmallmodelptdonutself dqapipeline pipelinedocumentquestionanswering modelhfinternaltestingtinyrandomdonut dqapipeline pipelinedocumentquestionanswering model tinyrandomdonut image https templates invoicehome cominvoicetemplateusneat750px png question how many cats are there outputs dqapipelineimageimage questionquestion topk2 self assertequal nestedsimplifyoutputs decimals4 score 0 8799 answer 2 score 0 296 answer 1 this model should also work if image is set to none this model should also work if image is set to none 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this is a pinned image from a specific revision of a document question answering space hosted by huggingface so we can expect it to be available this image does not detect any text in it meaning layoutlmv2 should fail empty answer probably we can optionnally pass directly the words and bounding boxes todo enable this once hf internal testing tiny random donut is implemented require_torch def test_small_model_pt_donut self dqa_pipeline pipeline document question answering model hf internal testing tiny random donut dqa_pipeline pipeline document question answering model tiny random donut image https templates invoicehome com invoice template us neat 750px png question how many cats are there outputs dqa_pipeline image image question question top_k 2 self assertequal nested_simplify outputs decimals 4 score 0 8799 answer 2 score 0 296 answer 1 this model should also work if image is set to none this model should also work if image is set to none
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectron2, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class Image: @staticmethod def open(*args, **kwargs): pass def load_image(_): return None INVOICE_URL = ( "https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png" ) @is_pipeline_test @require_torch @require_vision class DocumentQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def get_test_pipeline(self, model, tokenizer, processor): dqa_pipeline = pipeline( "document-question-answering", model=model, tokenizer=tokenizer, image_processor=processor ) image = INVOICE_URL word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) question = "What is the placebo?" examples = [ { "image": load_image(image), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def run_pipeline_test(self, dqa_pipeline, examples): outputs = dqa_pipeline(examples, top_k=2) self.assertEqual( outputs, [ [ {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, ] ] * 3, ) @require_torch @require_detectron2 @require_pytesseract def test_small_model_pt(self): dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2") image = INVOICE_URL question = "How many cats are there?" expected_output = [ {"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(outputs, []) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" words = [] boxes = [] outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2) self.assertEqual(outputs, []) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2, ) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt_chunk(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm_chunk(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) @slow @require_torch def test_large_model_pt_donut(self): dqa_pipeline = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa"), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), [{"answer": "us-001"}]) @require_tf @unittest.skip("Document question answering not implemented in TF") def test_small_model_tf(self): pass
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test with empty parameters test with various tokenizer parameters raise value error if truncation parameter given for two places test with empty parameters test with various tokenizer parameters raise value error if truncation parameter given for two places encoderdecoder models are trickier for this pipeline do we want encoder decoder inputs to get some featues do we want encoder only features for now ignore those if we send too small input there s a bug within funnelmodel output with shape 1 4 2 1 doesn t match the broadcast shape 1 4 2 2 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fmt skip fmt skip test with empty parameters fmt skip test with various tokenizer parameters raise value error if truncation parameter given for two places test with empty parameters fmt skip test with various tokenizer parameters raise value error if truncation parameter given for two places encoder_decoder models are trickier for this pipeline do we want encoder decoder inputs to get some featues do we want encoder only features for now ignore those if we send too small input there s a bug within funnelmodel output with shape 1 4 2 1 doesn t match the broadcast shape 1 4 2 2
import unittest import numpy as np from transformers import ( FEATURE_EXTRACTOR_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_MAPPING, TF_MODEL_MAPPING, FeatureExtractionPipeline, LxmertConfig, is_tf_available, is_torch_available, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @is_pipeline_test class FeatureExtractionPipelineTests(unittest.TestCase): model_mapping = MODEL_MAPPING tf_model_mapping = TF_MODEL_MAPPING @require_torch def test_small_model_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) @require_tf def test_small_model_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) @require_torch def test_tokenization_small_model_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) tokenize_kwargs = {"max_length": 3} outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs) self.assertEqual(np.squeeze(outputs).shape, (3, 32)) tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"truncation": True} with self.assertRaises(ValueError): _ = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) @require_tf def test_tokenization_small_model_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) tokenize_kwargs = {"max_length": 3} outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs) self.assertEqual(np.squeeze(outputs).shape, (3, 32)) tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"truncation": True} with self.assertRaises(ValueError): _ = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) @require_torch def test_return_tensors_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = feature_extractor("This is a test", return_tensors=True) self.assertTrue(torch.is_tensor(outputs)) @require_tf def test_return_tensors_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = feature_extractor("This is a test", return_tensors=True) self.assertTrue(tf.is_tensor(outputs)) def get_shape(self, input_, shape=None): if shape is None: shape = [] if isinstance(input_, list): subshapes = [self.get_shape(in_, shape) for in_ in input_] if all(s == 0 for s in subshapes): shape.append(len(input_)) else: subshape = subshapes[0] shape = [len(input_), *subshape] elif isinstance(input_, float): return 0 else: raise ValueError("We expect lists of floats, nothing else") return shape def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None: self.skipTest("No tokenizer") return elif ( type(model.config) in FEATURE_EXTRACTOR_MAPPING or isinstance(model.config, LxmertConfig) or type(model.config) in IMAGE_PROCESSOR_MAPPING ): self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.") return elif model.config.is_encoder_decoder: self.skipTest( ) return feature_extractor = FeatureExtractionPipeline(model=model, tokenizer=tokenizer, feature_extractor=processor) return feature_extractor, ["This is a test", "This is another test"] def run_pipeline_test(self, feature_extractor, examples): outputs = feature_extractor("This is a test") shape = self.get_shape(outputs) self.assertEqual(shape[0], 1) outputs = feature_extractor(["This is a test", "Another longer test"]) shape = self.get_shape(outputs) self.assertEqual(shape[0], 2) outputs = feature_extractor("This is a test" * 100, truncation=True) shape = self.get_shape(outputs) self.assertEqual(shape[0], 1)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license cleanup as much as possible gpu memory occupied by pytorch convert model to fp16 we actually don t care about the result we just want to make sure it works meaning the float16 tensor got casted back to float32 for postprocessing no masktoken is not supported pipeline argument call argument score equivalence for some bpe tokenizers w is removed during decoding so tokenstr won t be the same as in targets raises with invalid for some tokenizers is actually in the vocabulary and the expected error won t raised topk2 ntargets3 if we use the most probably targets and filter differently we should still have the same results for some bpe tokenizers w is removed during decoding so tokenstr won t be the same as in targets they should yield exactly the same result string duplicates id duplicates the target list contains duplicates so we can t output more than them 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license clean up as much as possible gpu memory occupied by pytorch convert model to fp16 we actually don t care about the result we just want to make sure it works meaning the float16 tensor got casted back to float32 for postprocessing no mask_token is not supported pipeline argument call argument score equivalence for some bpe tokenizers w is removed during decoding so token_str won t be the same as in targets raises with invalid for some tokenizers is actually in the vocabulary and the expected error won t raised top_k 2 ntargets 3 if we use the most probably targets and filter differently we should still have the same results for some bpe tokenizers w is removed during decoding so token_str won t be the same as in targets they should yield exactly the same result string duplicates id duplicates the target list contains duplicates so we can t output more than them
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( backend_empty_cache, is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, slow, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test class FillMaskPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_MASKED_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING def tearDown(self): super().tearDown() gc.collect() if is_torch_available(): backend_empty_cache(torch_device) @require_tf def test_small_model_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="tf") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"}, ], ) @require_torch def test_small_model_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="pt") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, ], ) outputs = unmasker("My name is <mask> <mask>", top_k=2) self.assertEqual( nested_simplify(outputs, decimals=6), [ [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ], ) @require_torch_accelerator def test_fp16_casting(self): pipe = pipeline( "fill-mask", model="hf-internal-testing/tiny-random-distilbert", device=torch_device, framework="pt", ) pipe.model.half() response = pipe("Paris is the [MASK] of France.") self.assertIsInstance(response, list) @slow @require_torch def test_large_model_pt(self): unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="pt") self.run_large_test(unmasker) @slow @require_tf def test_large_model_tf(self): unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="tf") self.run_large_test(unmasker) def run_large_test(self, unmasker): outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs), [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 12790, "token_str": " Lyon", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"}, ], ) outputs = unmasker( "My name is <mask>" + "Lorem ipsum dolor sit amet, consectetur adipiscing elit," * 100, tokenizer_kwargs={"truncation": True}, ) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is grouped", "score": 2.2e-05, "token": 38015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"}, ], ) @require_torch def test_model_no_pad_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="pt") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) @require_tf def test_model_no_pad_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="tf") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)") fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) examples = [ f"This is another {tokenizer.mask_token} test", ] return fill_masker, examples def run_pipeline_test(self, fill_masker, examples): tokenizer = fill_masker.tokenizer model = fill_masker.model outputs = fill_masker( f"This is a {tokenizer.mask_token}", ) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."]) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], ) with self.assertRaises(ValueError): fill_masker([None]) with self.assertRaises(PipelineException): fill_masker("This is") self.run_test_top_k(model, tokenizer) self.run_test_targets(model, tokenizer) self.run_test_top_k_targets(model, tokenizer) self.fill_mask_with_duplicate_targets_and_top_k(model, tokenizer) self.fill_mask_with_multiple_masks(model, tokenizer) def run_test_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() targets = sorted(vocab.keys())[:2] fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, targets=targets) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) tokens = [top_mask["token_str"] for top_mask in outputs] scores = [top_mask["score"] for top_mask in outputs] if set(tokens) == set(targets): unmasked_targets = fill_masker(f"This is a {tokenizer.mask_token}", targets=tokens) target_scores = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(scores), nested_simplify(target_scores)) with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[]) if "" not in tokenizer.get_vocab(): with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[""]) with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets="") def run_test_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, top_k=2) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2) self.assertEqual( outputs2, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def run_test_top_k_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) targets = sorted(vocab.keys())[:3] outputs = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2, targets=targets) targets2 = [el["token_str"] for el in sorted(outputs, key=lambda x: x["score"], reverse=True)] if set(targets2).issubset(targets): outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=3, targets=targets2) self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def fill_mask_with_duplicate_targets_and_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) vocab = tokenizer.get_vocab() targets = sorted(vocab.keys())[:3] targets = [targets[0], targets[1], targets[0], targets[2], targets[1]] outputs = fill_masker(f"My name is {tokenizer.mask_token}", targets=targets, top_k=10) self.assertEqual(len(outputs), 3) def fill_mask_with_multiple_masks(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker( f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}", top_k=2 ) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], )
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license accepts url pil image lists rgba la l assert that the pipeline can be initialized with a feature extractor that is not in any mapping perceiver is not tested by runpipelinetest properly that is because the type of featureextractor and model preprocessor need to be kept in sync which is not the case in the current design 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license accepts url pil image lists rgba la l assert that the pipeline can be initialized with a feature extractor that is not in any mapping perceiver is not tested by run_pipeline_test properly that is because the type of feature_extractor and model preprocessor need to be kept in sync which is not the case in the current design
import unittest from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, PreTrainedTokenizerBase, is_vision_available, ) from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch_or_tf @require_vision class ImageClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): image_classifier = ImageClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] return image_classifier, examples def run_pipeline_test(self, image_classifier, examples): outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) import datasets dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") outputs = image_classifier( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", dataset[0]["file"], dataset[1]["file"], dataset[2]["file"], ] ) self.assertEqual( outputs, [ [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) @require_tf def test_small_model_tf(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model, framework="tf") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) def test_custom_tokenizer(self): tokenizer = PreTrainedTokenizerBase() image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer ) self.assertIs(image_classifier.tokenizer, tokenizer) @slow @require_torch def test_perceiver(self): image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4385, "label": "tabby, tabby cat"}, {"score": 0.321, "label": "tiger cat"}, {"score": 0.0502, "label": "Egyptian cat"}, {"score": 0.0137, "label": "crib, cot"}, {"score": 0.007, "label": "radiator"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.5658, "label": "tabby, tabby cat"}, {"score": 0.1309, "label": "tiger cat"}, {"score": 0.0722, "label": "Egyptian cat"}, {"score": 0.0707, "label": "remote control, remote"}, {"score": 0.0082, "label": "computer keyboard, keypad"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3022, "label": "tabby, tabby cat"}, {"score": 0.2362, "label": "Egyptian cat"}, {"score": 0.1856, "label": "tiger cat"}, {"score": 0.0324, "label": "remote control, remote"}, {"score": 0.0096, "label": "quilt, comforter, comfort, puff"}, ], )
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license instance segmentation maskformer and detr have a slot for null class and can output nothing even with a low threshold xxx pil image implements eq which bypasses any so we inverse the comparison to make it work rgba la l we need to test batchsize with images with the same size detr doesn t normalize the size of the images meaning we can have 800x800 or 800x1200 meaning we cannot batch simply we simply bail on this 5 times the same image so the output shape is predictable the default task is imageclassification we need to override this model does not support neither instance nor panoptic we should error out shortening by hashing this is extremely brittle and those values are made specific for the ci this must be surprising to the reader the panoptic returns only label215 and this returns 3 labels page links to visualize actual links to get files convert masks to numpy array with untrained tiny random models the output logits tensor is very likely to contain many values close to each other which cause argmax to give quite different results when running the test on 2 environments we use a lower threshold 0 9 here to avoid flakiness shortening by hashing shortening by hashing shortening by hashing shortening by hashing shortening by hashing shortening by hashing different task shortening by hashing different task shortening by hashing 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license instance segmentation maskformer and detr have a slot for null class and can output nothing even with a low threshold xxx pil image implements __eq__ which bypasses any so we inverse the comparison to make it work rgba la l we need to test batch_size with images with the same size detr doesn t normalize the size of the images meaning we can have 800x800 or 800x1200 meaning we cannot batch simply we simply bail on this 5 times the same image so the output shape is predictable the default task is image classification we need to override this model does not support neither instance nor panoptic we should error out shortening by hashing this is extremely brittle and those values are made specific for the ci this must be surprising to the reader the panoptic returns only label_215 and this returns 3 labels page links to visualize actual links to get files convert masks to numpy array with un trained tiny random models the output logits tensor is very likely to contain many values close to each other which cause argmax to give quite different results when running the test on 2 environments we use a lower threshold 0 9 here to avoid flakiness shortening by hashing shortening by hashing shortening by hashing shortening by hashing shortening by hashing shortening by hashing different task shortening by hashing different task shortening by hashing
import tempfile import unittest from typing import Dict import datasets import numpy as np import requests from datasets import load_dataset from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, AutoImageProcessor, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, DetrForSegmentation, ImageSegmentationPipeline, MaskFormerForInstanceSegmentation, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) white_pixels = (npimg == 255).sum() shape = npimg.shape return {"hash": hashimage(mask), "white_pixels": white_pixels, "shape": shape} def mask_to_test_readable_only_shape(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"shape": shape} @is_pipeline_test @require_vision @require_timm @require_torch class ImageSegmentationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items()) if MODEL_FOR_IMAGE_SEGMENTATION_MAPPING else []) + (MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items() if MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING else []) + (MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() if MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): image_segmenter = ImageSegmentationPipeline(model=model, image_processor=processor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, image_segmenter, examples): outputs = image_segmenter( "./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, ) self.assertIsInstance(outputs, list) n = len(outputs) if isinstance(image_segmenter.model, (MaskFormerForInstanceSegmentation, DetrForSegmentation)): self.assertGreaterEqual(n, 0) else: self.assertGreaterEqual(n, 1) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs) dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") outputs = image_segmenter(dataset[0]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) outputs = image_segmenter(dataset[1]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) outputs = image_segmenter(dataset[2]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) if isinstance(image_segmenter.model, DetrForSegmentation): batch_size = 1 else: batch_size = 2 batch = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] outputs = image_segmenter( batch, threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, batch_size=batch_size, ) self.assertEqual(len(batch), len(outputs)) self.assertEqual(len(outputs[0]), n) self.assertEqual( [ [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, ], outputs, f"Expected [{n}, {n}, {n}, {n}, {n}], got {[len(item) for item in outputs]}", ) @require_tf @unittest.skip("Image segmentation not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt_no_panoptic(self): model_id = "hf-internal-testing/tiny-random-mobilevit" pipe = pipeline(task="image-segmentation", model=model_id) with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="panoptic") self.assertEqual( str(e.exception), "Subtask panoptic is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") self.assertEqual( str(e.exception), "Subtask instance is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = ImageSegmentationPipeline( model=model, image_processor=image_processor, subtask="panoptic", threshold=0.0, mask_threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ], ) output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(output, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="semantic") output_masks = [o["mask"] for o in output] expected_masks = [ "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_0.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_1.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_2.png", ] expected_masks = [x.replace("/blob/", "/resolve/") for x in expected_masks] expected_masks = [Image.open(requests.get(image, stream=True).raw) for image in expected_masks] output_masks = [np.array(x) for x in output_masks] expected_masks = [np.array(x) for x in expected_masks] self.assertEqual(output_masks[0].shape, expected_masks[0].shape) self.assertEqual(output_masks[1].shape, expected_masks[1].shape) self.assertEqual(output_masks[2].shape, expected_masks[2].shape) self.assertGreaterEqual(np.mean(output_masks[0] == expected_masks[0]), 0.9) self.assertGreaterEqual(np.mean(output_masks[1] == expected_masks[1]), 0.9) self.assertGreaterEqual(np.mean(output_masks[2] == expected_masks[2]), 0.9) for o in output: o["mask"] = mask_to_test_readable_only_shape(o["mask"]) self.maxDiff = None self.assertEqual( nested_simplify(output, decimals=4), [ { "label": "LABEL_88", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_101", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_215", "mask": {"shape": (480, 640)}, "score": None, }, ], ) @require_torch def test_small_model_pt_semantic(self): model_id = "hf-internal-testing/tiny-random-beit-pipeline" image_segmenter = pipeline(model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg") for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "LABEL_0", "mask": {"hash": "42d0907228", "shape": (480, 640), "white_pixels": 10714}, }, { "score": None, "label": "LABEL_1", "mask": {"hash": "46b8cc3976", "shape": (480, 640), "white_pixels": 296486}, }, ], ) @require_torch @slow def test_integration_torch_image_segmentation(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline( "image-segmentation", model=model_id, threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ], ) @require_torch @slow def test_threshold(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline("image-segmentation", model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.999) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9995, "label": "remote", "mask": {"hash": "d02404f578", "shape": (480, 640), "white_pixels": 2789}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "eaa115b40c", "shape": (480, 640), "white_pixels": 304411}, }, ], ) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.5) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) @require_torch @slow def test_maskformer(self): threshold = 0.8 model_id = "facebook/maskformer-swin-base-ade" model = AutoModelForInstanceSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline("image-segmentation", model=model, image_processor=image_processor) image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") file = image[0]["file"] outputs = image_segmenter(file, threshold=threshold) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9974, "label": "wall", "mask": {"hash": "a547b7c062", "shape": (512, 683), "white_pixels": 14252}, }, { "score": 0.949, "label": "house", "mask": {"hash": "0da9b7b38f", "shape": (512, 683), "white_pixels": 132177}, }, { "score": 0.9995, "label": "grass", "mask": {"hash": "1d07ea0a26", "shape": (512, 683), "white_pixels": 53444}, }, { "score": 0.9976, "label": "tree", "mask": {"hash": "6cdc97c7da", "shape": (512, 683), "white_pixels": 7944}, }, { "score": 0.8239, "label": "plant", "mask": {"hash": "1ab4ce378f", "shape": (512, 683), "white_pixels": 4136}, }, { "score": 0.9942, "label": "road, route", "mask": {"hash": "39c5d17be5", "shape": (512, 683), "white_pixels": 1941}, }, { "score": 1.0, "label": "sky", "mask": {"hash": "a3756324a6", "shape": (512, 683), "white_pixels": 135802}, }, ], ) @require_torch @slow def test_oneformer(self): image_segmenter = pipeline(model="shi-labs/oneformer_ade20k_swin_tiny") image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") file = image[0]["file"] outputs = image_segmenter(file, threshold=0.99) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9981, "label": "grass", "mask": {"hash": "3a92904d4c", "white_pixels": 118131, "shape": (512, 683)}, }, { "score": 0.9992, "label": "sky", "mask": {"hash": "fa2300cc9a", "white_pixels": 231565, "shape": (512, 683)}, }, ], ) outputs = image_segmenter(file, threshold=0.99, subtask="instance") for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9991, "label": "sky", "mask": {"hash": "8b1ffad016", "white_pixels": 230566, "shape": (512, 683)}, }, { "score": 0.9981, "label": "grass", "mask": {"hash": "9bbdf83d3d", "white_pixels": 119130, "shape": (512, 683)}, }, ], ) outputs = image_segmenter(file, subtask="semantic") for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "wall", "mask": {"hash": "897fb20b7f", "white_pixels": 14506, "shape": (512, 683)}, }, { "score": None, "label": "building", "mask": {"hash": "f2a68c63e4", "white_pixels": 125019, "shape": (512, 683)}, }, { "score": None, "label": "sky", "mask": {"hash": "e0ca3a548e", "white_pixels": 135330, "shape": (512, 683)}, }, { "score": None, "label": "tree", "mask": {"hash": "7c9544bcac", "white_pixels": 16263, "shape": (512, 683)}, }, { "score": None, "label": "road, route", "mask": {"hash": "2c7704e491", "white_pixels": 2143, "shape": (512, 683)}, }, { "score": None, "label": "grass", "mask": {"hash": "bf6c2867e0", "white_pixels": 53040, "shape": (512, 683)}, }, { "score": None, "label": "plant", "mask": {"hash": "93c4b7199e", "white_pixels": 3335, "shape": (512, 683)}, }, { "score": None, "label": "house", "mask": {"hash": "93ec419ad5", "white_pixels": 60, "shape": (512, 683)}, }, ], ) def test_save_load(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline( task="image-segmentation", model=model, image_processor=image_processor, ) with tempfile.TemporaryDirectory() as tmpdirname: image_segmenter.save_pretrained(tmpdirname) pipeline(task="image-segmentation", model=tmpdirname)
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import ( MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, AutoImageProcessor, AutoModelForImageToImage, ImageToImagePipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class ImageToImagePipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] @require_torch @require_vision @slow def test_pipeline(self): model_id = "caidas/swin2SR-classical-sr-x2-64" upscaler = pipeline("image-to-image", model=model_id) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976)) @require_torch @require_vision @slow def test_pipeline_model_processor(self): model_id = "caidas/swin2SR-classical-sr-x2-64" model = AutoModelForImageToImage.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) upscaler = ImageToImagePipeline(model=model, image_processor=image_processor) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976))
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest import requests from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, is_torch_available, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: is_torch_greater_or_equal_than_1_11 = False if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ImageToTextPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING def get_test_pipeline(self, model, tokenizer, processor): pipe = pipeline("image-to-text", model=model, tokenizer=tokenizer, image_processor=processor) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "./tests/fixtures/tests_samples/COCO/000000039769.png", ] return pipe, examples def run_pipeline_test(self, pipe, examples): outputs = pipe(examples) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}], [{"generated_text": ANY(str)}], ], ) @require_tf def test_small_model_tf(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) outputs = pipe(image, max_new_tokens=1) self.assertEqual( outputs, [{"generated_text": "growth"}], ) @require_torch def test_small_model_pt(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) @require_torch def test_small_model_pt_conditional(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe(image, prompt=prompt) self.assertTrue(outputs[0]["generated_text"].startswith(prompt)) @slow @require_torch def test_large_model_pt(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a pink pokemon pokemon with a blue shirt and a blue shirt"}]) @slow @require_torch def test_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cartoon of a purple character."}]) @slow @require_torch def test_conditional_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photography of" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photography of a volcano"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photo of a" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photo of a tent with a tent and a tent in the background."}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11, reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." ) @slow @require_torch def test_conditional_generation_pt_pix2struct(self): pipe = pipeline("image-to-text", model="google/pix2struct-ai2d-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "ash cloud"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_tf def test_large_model_tf(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], )
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo implement me arthur shortening by hashing fmt off fmt on shortening by hashing 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo implement me arthur shortening by hashing fmt off fmt on shortening by hashing
import unittest from typing import Dict import numpy as np from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"hash": hashimage(mask), "shape": shape} @is_pipeline_test @require_vision @require_torch class MaskGenerationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) tf_model_mapping = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items()) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): image_segmenter = MaskGenerationPipeline(model=model, image_processor=processor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, mask_generator, examples): pass @require_tf @unittest.skip("Image segmentation not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_small_model_pt(self): image_segmenter = pipeline("mask-generation", model="facebook/sam-vit-huge") outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256) new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871} ], ) @require_torch @slow def test_threshold(self): model_id = "facebook/sam-vit-huge" image_segmenter = pipeline("mask-generation", model=model_id) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256 ) new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053}, ], )
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license rgba la l 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license rgba la l
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_timm @require_torch class ObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): object_detector = ObjectDetectionPipeline(model=model, image_processor=processor) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def run_pipeline_test(self, object_detector, examples): outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0) self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) import datasets dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", dataset[0]["file"], dataset[1]["file"], dataset[2]["file"], ] batch_outputs = object_detector(batch, threshold=0.0) self.assertEqual(len(batch), len(batch_outputs)) for outputs in batch_outputs: self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) @require_tf @unittest.skip("Object detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], threshold=0.0, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ], ) @require_torch @slow def test_large_model_pt(self): model_id = "facebook/detr-resnet-50" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_integration_torch_object_detection(self): model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_threshold(self): threshold = 0.9985 model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) @require_torch @require_pytesseract @slow def test_layoutlm(self): model_id = "Narsil/layoutlmv3-finetuned-funsd" threshold = 0.9993 object_detector = pipeline("object-detection", model=model_id, threshold=threshold) outputs = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ], )
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license some models switch transformers led t5 longt5 etc can handle long sequences too long and exception is expected for tf models if the weights are initialized in gpu context we won t get expected index error from the embedding layer 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license some models switch transformers led t5 longt5 etc can handle long sequences positional embeddings up to a fixed maximum size otherwise clamping the values too long and exception is expected for tf models if the weights are initialized in gpu context we won t get expected index error from the embedding layer
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, SummarizationPipeline, TFPreTrainedModel, pipeline, ) from transformers.testing_utils import get_gpu_count, is_pipeline_test, require_tf, require_torch, slow, torch_device from transformers.tokenization_utils import TruncationStrategy from .test_pipelines_common import ANY @is_pipeline_test class SummarizationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): summarizer = SummarizationPipeline(model=model, tokenizer=tokenizer) return summarizer, ["(CNN)The Palestinian Authority officially became", "Some other text"] def run_pipeline_test(self, summarizer, _): model = summarizer.model outputs = summarizer("(CNN)The Palestinian Authority officially became") self.assertEqual(outputs, [{"summary_text": ANY(str)}]) outputs = summarizer( "(CNN)The Palestinian Authority officially became ", num_beams=2, min_length=2, max_length=5, ) self.assertEqual(outputs, [{"summary_text": ANY(str)}]) model_can_handle_longer_seq = [ "SwitchTransformersConfig", "T5Config", "LongT5Config", "LEDConfig", "PegasusXConfig", "FSMTConfig", "M2M100Config", "ProphetNetConfig", ] if model.config.__class__.__name__ not in model_can_handle_longer_seq: if not ( isinstance(model, TFPreTrainedModel) and get_gpu_count() > 0 and len(summarizer.model.trainable_weights) > 0 ): with self.assertRaises(Exception): outputs = summarizer("This " * 1000) outputs = summarizer("This " * 1000, truncation=TruncationStrategy.ONLY_FIRST) @require_torch def test_small_model_pt(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="pt") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_tf def test_small_model_tf(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="tf") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_torch @slow def test_integration_torch_summarization(self): summarizer = pipeline(task="summarization", device=torch_device) cnn_article = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) expected_cnn_summary = ( " The Palestinian Authority becomes the 123rd member of the International Criminal Court . The move gives" " the court jurisdiction over alleged crimes in Palestinian territories . Israel and the United States" " opposed the Palestinians' efforts to join the court . Rights group Human Rights Watch welcomes the move," " says governments seeking to penalize Palestine should end pressure ." ) result = summarizer(cnn_article) self.assertEqual(result[0]["summary_text"], expected_cnn_summary)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license putting it there for consistency but tqa do not have fast tokenizer which are needed to generate automatic tests self assertnotequalsequentialoutputs2 batchoutputs2 self assertnotequalsequentialoutputs2 batchoutputs2 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license putting it there for consistency but tqa do not have fast tokenizer which are needed to generate automatic tests self assertnotequal sequential_outputs 2 batch_outputs 2 self assertnotequal sequential_outputs 2 batch_outputs 2
import unittest from transformers import ( MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, AutoModelForTableQuestionAnswering, AutoTokenizer, TableQuestionAnsweringPipeline, TFAutoModelForTableQuestionAnswering, is_torch_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_pandas, require_tensorflow_probability, require_tf, require_torch, slow, ) if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_12 else: is_torch_greater_or_equal_than_1_12 = False @is_pipeline_test class TQAPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING @require_tensorflow_probability @require_pandas @require_tf @require_torch def test_small_model_tf(self): model_id = "lysandre/tiny-tapas-random-wtq" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True) tokenizer = AutoTokenizer.from_pretrained(model_id) self.assertIsInstance(model.config.aggregation_labels, dict) self.assertIsInstance(model.config.no_aggregation_label_index, int) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch def test_small_model_pt(self): model_id = "lysandre/tiny-tapas-random-wtq" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) self.assertIsInstance(model.config.aggregation_labels, dict) self.assertIsInstance(model.config.no_aggregation_label_index, int) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch def test_slow_tokenizer_sqa_pt(self): model_id = "lysandre/tiny-tapas-random-sqa" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) inputs = { "table": { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, "query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], } sequential_outputs = table_querier(**inputs, sequential=True) batch_outputs = table_querier(**inputs, sequential=False) self.assertEqual(len(sequential_outputs), 3) self.assertEqual(len(batch_outputs), 3) self.assertEqual(sequential_outputs[0], batch_outputs[0]) self.assertNotEqual(sequential_outputs[1], batch_outputs[1]) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @require_tf @require_tensorflow_probability @require_pandas @require_torch def test_slow_tokenizer_sqa_tf(self): model_id = "lysandre/tiny-tapas-random-sqa" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) inputs = { "table": { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, "query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], } sequential_outputs = table_querier(**inputs, sequential=True) batch_outputs = table_querier(**inputs, sequential=False) self.assertEqual(len(sequential_outputs), 3) self.assertEqual(len(batch_outputs), 3) self.assertEqual(sequential_outputs[0], batch_outputs[0]) self.assertNotEqual(sequential_outputs[1], batch_outputs[1]) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @slow @require_torch def test_integration_wtq_pt(self): table_querier = pipeline("table-question-answering") data = { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } queries = [ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ] results = table_querier(data, queries) expected_results = [ {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, { "answer": "COUNT > Transformers, Datasets, Tokenizers", "coordinates": [(0, 0), (1, 0), (2, 0)], "cells": ["Transformers", "Datasets", "Tokenizers"], "aggregator": "COUNT", }, { "answer": "AVERAGE > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "AVERAGE", }, { "answer": "SUM > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "SUM", }, ] self.assertListEqual(results, expected_results) @slow @require_tensorflow_probability @require_pandas def test_integration_wtq_tf(self): model_id = "google/tapas-base-finetuned-wtq" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = pipeline("table-question-answering", model=model, tokenizer=tokenizer) data = { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } queries = [ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ] results = table_querier(data, queries) expected_results = [ {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, { "answer": "COUNT > Transformers, Datasets, Tokenizers", "coordinates": [(0, 0), (1, 0), (2, 0)], "cells": ["Transformers", "Datasets", "Tokenizers"], "aggregator": "COUNT", }, { "answer": "AVERAGE > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "AVERAGE", }, { "answer": "SUM > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "SUM", }, ] self.assertListEqual(results, expected_results) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @slow @require_torch def test_integration_sqa_pt(self): table_querier = pipeline( "table-question-answering", model="google/tapas-base-finetuned-sqa", tokenizer="google/tapas-base-finetuned-sqa", ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]}, {"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]}, {"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]}, ] self.assertListEqual(results, expected_results) @slow @require_tensorflow_probability @require_pandas def test_integration_sqa_tf(self): model_id = "google/tapas-base-finetuned-sqa" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = pipeline( "table-question-answering", model=model, tokenizer=tokenizer, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]}, {"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]}, {"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]}, ] self.assertListEqual(results, expected_results) @slow @require_torch def test_large_model_pt_tapex(self): model_id = "microsoft/tapex-large-finetuned-wtq" table_querier = pipeline( "table-question-answering", model=model_id, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = [ "How many movies has George Clooney played in?", "How old is Mr Clooney ?", "What's the date of birth of Leonardo ?", ] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": " 69"}, {"answer": " 59"}, {"answer": " 10 june 1996"}, ] self.assertListEqual(results, expected_results)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license these are encoder decoder they don t just append to incoming string dosamplefalse necessary for reproducibility dosamplefalse necessary for reproducibility 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license these are encoder decoder they don t just append to incoming string do_sample false necessary for reproducibility do_sample false necessary for reproducibility
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, Text2TextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class Text2TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): generator = Text2TextGenerationPipeline(model=model, tokenizer=tokenizer) return generator, ["Something to write", "Something else"] def run_pipeline_test(self, generator, _): outputs = generator("Something there") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertFalse(outputs[0]["generated_text"].startswith("Something there")) outputs = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) outputs = generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): generator(4) @require_torch def test_small_model_pt(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}]) num_return_sequences = 3 outputs = generator( "Something there", num_return_sequences=num_return_sequences, num_beams=num_return_sequences, ) target_outputs = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(outputs, target_outputs) outputs = generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ) generator.tokenizer.pad_token_id = generator.model.config.eos_token_id generator.tokenizer.pad_token = "<pad>" outputs = generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ], ) @require_tf def test_small_model_tf(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}])
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license these 2 model types require different inputs than those of the usual text models legacy behavior small inputs because barttokenizer tiny has maximum position embeddings 22 forcing to get all results with topknone this is not the legacy format this might be used a text pair but tokenizer pipe interaction makes it hard to understand that it s not using the pair properly https github comhuggingfacetransformersissues17305 we disabled this usage instead as it was outputting wrong outputs this used to be valid for doing text pairs we re keeping it working because of backward compatibility 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license these 2 model types require different inputs than those of the usual text models legacy behavior small inputs because barttokenizer tiny has maximum position embeddings 22 forcing to get all results with top_k none this is not the legacy format this might be used a text pair but tokenizer pipe interaction makes it hard to understand that it s not using the pair properly https github com huggingface transformers issues 17305 we disabled this usage instead as it was outputting wrong outputs this used to be valid for doing text pairs we re keeping it working because of backward compatibility
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow, torch_device from .test_pipelines_common import ANY _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class TextClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def test_small_model_pt(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", top_k=2) self.assertEqual( nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) outputs = text_classifier(["This is great !", "This is bad"], top_k=2) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier("This is great !", top_k=1) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", return_all_scores=False) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", return_all_scores=True) self.assertEqual( nested_simplify(outputs), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=True) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=False) self.assertEqual( nested_simplify(outputs), [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ], ) @require_torch def test_accepts_torch_device(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch_device, ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @require_tf def test_small_model_tf(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @slow @require_torch def test_pt_bert(self): text_classifier = pipeline("text-classification") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) @slow @require_tf def test_tf_bert(self): text_classifier = pipeline("text-classification", framework="tf") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) def get_test_pipeline(self, model, tokenizer, processor): text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) return text_classifier, ["HuggingFace is in", "This is another test"] def run_pipeline_test(self, text_classifier, _): model = text_classifier.model valid_inputs = "HuggingFace is in" outputs = text_classifier(valid_inputs) self.assertEqual(nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}]) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) valid_inputs = ["HuggingFace is in ", "Paris is in France"] outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}, {"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) self.assertTrue(outputs[1]["label"] in model.config.id2label.values()) outputs = text_classifier(valid_inputs, top_k=None) N = len(model.config.id2label.values()) self.assertEqual( nested_simplify(outputs), [[{"label": ANY(str), "score": ANY(float)}] * N, [{"label": ANY(str), "score": ANY(float)}] * N], ) valid_inputs = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), {"label": ANY(str), "score": ANY(float)}, ) self.assertTrue(outputs["label"] in model.config.id2label.values()) invalid_input = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(ValueError): text_classifier(invalid_input) outputs = text_classifier([[["HuggingFace is in ", "Paris is in France"]]]) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for now only test texttowaveform and not texttospectrogram test two examples sidebyside test batching using dosamplefalse to force deterministic output test two examples sidebyside test other generation strategy test using a speaker embedding atm must do to stay coherent with barkprocessor historyprompt is a torch tensor passed as a forwardparam if generation is successful it means that it was passed to the right device test two examples sidebyside test batching use vits a forward model for reproducibility assert error if generate parameter assert error if generatekwargs with forwardonly models use musicgen a generative model for reproducibility make sure generate kwargs get priority over forward params for reproducibility 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for now only test text_to_waveform and not text_to_spectrogram test two examples side by side test batching using do_sample false to force deterministic output test two examples side by side test other generation strategy test using a speaker embedding atm must do to stay coherent with barkprocessor history_prompt is a torch tensor passed as a forward_param if generation is successful it means that it was passed to the right device test two examples side by side test batching use vits a forward model for reproducibility assert error if generate parameter assert error if generate_kwargs with forward only models use musicgen a generative model for reproducibility make sure generate kwargs get priority over forward params for reproducibility
import unittest import numpy as np from transformers import ( MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, AutoProcessor, TextToAudioPipeline, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.trainer_utils import set_seed from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextToAudioPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING @slow @require_torch def test_small_musicgen_pt(self): music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": False, "max_new_tokens": 250, } outputs = music_generator("This is a test", forward_params=forward_params) self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 32000}, outputs) outputs = music_generator(["This is a test", "This is a second test"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) outputs = music_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2 ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch def test_small_bark_pt(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt") forward_params = { "do_sample": False, "semantic_max_new_tokens": 100, } outputs = speech_generator("This is a test", forward_params=forward_params) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, "semantic_num_return_sequences": 2, } outputs = speech_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) processor = AutoProcessor.from_pretrained("suno/bark-small") temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch_accelerator def test_conversion_additional_tensor(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=torch_device) processor = AutoProcessor.from_pretrained("suno/bark-small") forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, } preprocess_params = { "max_length": 256, "add_special_tokens": False, "return_attention_mask": True, "return_token_type_ids": False, "padding": "max_length", } outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params, ) temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params ) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) @slow @require_torch def test_vits_model_pt(self): speech_generator = pipeline(task="text-to-audio", model="facebook/mms-tts-eng", framework="pt") outputs = speech_generator("This is a test") self.assertEqual(outputs["sampling_rate"], 16000) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) outputs = speech_generator(["This is a test", "This is a second test"]) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) outputs = speech_generator(["This is a test", "This is a second test"], batch_size=2) self.assertEqual(ANY(np.ndarray), outputs[0]["audio"]) @slow @require_torch def test_forward_model_kwargs(self): speech_generator = pipeline(task="text-to-audio", model="kakao-enterprise/vits-vctk", framework="pt") set_seed(555) outputs = speech_generator("This is a test", forward_params={"speaker_id": 5}) audio = outputs["audio"] with self.assertRaises(TypeError): outputs = speech_generator("This is a test", forward_params={"speaker_id": 5, "do_sample": True}) forward_params = {"speaker_id": 5} generate_kwargs = {"do_sample": True} with self.assertRaises(ValueError): outputs = speech_generator( "This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs ) self.assertTrue(np.abs(outputs["audio"] - audio).max() < 1e-5) @slow @require_torch def test_generative_model_kwargs(self): music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": True, "max_new_tokens": 250, } set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) forward_params = { "do_sample": False, "max_new_tokens": 250, } generate_kwargs = {"do_sample": True} set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs) self.assertListEqual(outputs["audio"].tolist(), audio.tolist()) def get_test_pipeline(self, model, tokenizer, processor): speech_generator = TextToAudioPipeline(model=model, tokenizer=tokenizer) return speech_generator, ["This is a test", "Another test"] def run_pipeline_test(self, speech_generator, _): outputs = speech_generator("This is a test") self.assertEqual(ANY(np.ndarray), outputs["audio"]) forward_params = ( {"num_return_sequences": 2, "do_sample": True} if speech_generator.model.can_generate() else {} ) outputs = speech_generator(["This is great !", "Something else"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we don t provide a default for this pair but we do for this one missing srclang tgtlang srclang tgtlang can be defined at pipeline call time 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we don t provide a default for this pair but we do for this one missing src_lang tgt_lang src_lang tgt_lang can be defined at pipeline call time
import unittest import pytest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MBart50TokenizerFast, MBartConfig, MBartForConditionalGeneration, TranslationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow from .test_pipelines_common import ANY @is_pipeline_test class TranslationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): if isinstance(model.config, MBartConfig): src_lang, tgt_lang = list(tokenizer.lang_code_to_id.keys())[:2] translator = TranslationPipeline(model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang) else: translator = TranslationPipeline(model=model, tokenizer=tokenizer) return translator, ["Some string", "Some other text"] def run_pipeline_test(self, translator, _): outputs = translator("Some string") self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string", "other string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}, {"translation_text": ANY(str)}]) @require_torch def test_small_model_pt(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_tf def test_small_model_tf(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_torch def test_en_to_de_pt(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) @require_tf def test_en_to_de_tf(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) class TranslationNewFormatPipelineTests(unittest.TestCase): @require_torch @slow def test_default_translations(self): with self.assertRaises(ValueError): pipeline(task="translation_cn_to_ar") translator = pipeline(task="translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch @slow def test_multilingual_translation(self): model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") translator = pipeline(task="translation", model=model, tokenizer=tokenizer) with self.assertRaises(ValueError): translator("This is a test") outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN") self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}]) translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR") outputs = translator("This is a test") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) @require_torch def test_translation_on_odd_language(self): model = "patrickvonplaten/t5-tiny-random" translator = pipeline(task="translation_cn_to_ar", model=model) self.assertEqual(translator._preprocess_params["src_lang"], "cn") self.assertEqual(translator._preprocess_params["tgt_lang"], "ar") @require_torch def test_translation_default_language_selection(self): model = "patrickvonplaten/t5-tiny-random" with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"): translator = pipeline(task="translation", model=model) self.assertEqual(translator.task, "translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch def test_translation_with_no_language_no_model_fails(self): with self.assertRaises(ValueError): pipeline(task="translation")
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class VideoClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): example_video_filepath = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) video_classifier = VideoClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def run_pipeline_test(self, video_classifier, examples): for example in examples: outputs = video_classifier(example) self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" small_feature_extractor = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) video_classifier = pipeline( "video-classification", model=small_model, feature_extractor=small_feature_extractor, frame_sampling_rate=4 ) video_file_path = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset") outputs = video_classifier(video_file_path, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ) outputs = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ], ) @require_tf def test_small_model_tf(self): pass
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class VisualQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def get_test_pipeline(self, model, tokenizer, processor): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") examples = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def run_pipeline_test(self, vqa_pipeline, examples): outputs = vqa_pipeline(examples, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question="How many cats are there?", top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) @require_torch @require_torch_accelerator def test_small_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration" ) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": ANY(str)}]] * 2) vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16) outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) @slow @require_torch def test_large_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2, ) @slow @require_torch @require_torch_accelerator def test_large_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="Salesforce/blip2-opt-2.7b", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "Question: how many cats are there? Answer:" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": "two"}]] * 2) @require_tf @unittest.skip("Visual question answering not implemented in TF") def test_small_model_tf(self): pass
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license these 2 model types require different inputs than those of the usual text models no kwarg https github comhuggingfacetransformersissues13846 there was a regression in 4 10 for this adding a test so we don t make the mistake again https github comhuggingfacetransformersissues13381issuecomment912343499 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license these 2 model types require different inputs than those of the usual text models no kwarg https github com huggingface transformers issues 13846 there was a regression in 4 10 for this adding a test so we don t make the mistake again https github com huggingface transformers issues 13381 issuecomment 912343499
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class ZeroShotClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): classifier = ZeroShotClassificationPipeline( model=model, tokenizer=tokenizer, candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def run_pipeline_test(self, classifier, _): outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics") self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier("Who are you voting for in 2020?", ["politics"]) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics"]) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health") self.assertEqual( outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0) outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"]) self.assertEqual( outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0) outputs = classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}" ) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier(["I am happy"], ["positive", "negative"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} for i in range(1) ], ) outputs = classifier(["I am happy", "I am sad"], ["positive", "negative"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} for i in range(2) ], ) with self.assertRaises(ValueError): classifier("", candidate_labels="politics") with self.assertRaises(TypeError): classifier(None, candidate_labels="politics") with self.assertRaises(ValueError): classifier("Who are you voting for in 2020?", candidate_labels="") with self.assertRaises(TypeError): classifier("Who are you voting for in 2020?", candidate_labels=None) with self.assertRaises(ValueError): classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", ) with self.assertRaises(AttributeError): classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=None, ) self.run_entailment_id(classifier) def run_entailment_id(self, zero_shot_classifier: Pipeline): config = zero_shot_classifier.model.config original_label2id = config.label2id original_entailment = zero_shot_classifier.entailment_id config.label2id = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id, -1) config.label2id = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id, 2) zero_shot_classifier.model.config.label2id = original_label2id self.assertEqual(original_entailment, zero_shot_classifier.entailment_id) @require_torch def test_truncation(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", ) zero_shot_classifier( "Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"] ) @require_torch def test_small_model_pt(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], }, ) @require_tf def test_small_model_tf(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], }, ) @slow @require_torch def test_large_model_pt(self): zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt") outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], }, ) outputs = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=True, ) self.assertEqual( nested_simplify(outputs), { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], }, ) @slow @require_tf def test_large_model_tf(self): zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf") outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], }, ) outputs = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=True, ) self.assertEqual( nested_simplify(outputs), { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], }, )
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license deactivating auto tests since we don t have a good modelforxx mapping and only clap would be there for now modelmapping clapconfig clapmodel this is an audio of a dog 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license deactivating auto tests since we don t have a good model_for_xx mapping and only clap would be there for now model_mapping clapconfig clapmodel this is an audio of a dog
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class ZeroShotAudioClassificationPipelineTests(unittest.TestCase): @require_torch def test_small_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" ) dataset = load_dataset("ashraq/esc50") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], ) @unittest.skip("No models are available in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", ) dataset = load_dataset("ashraq/esc50") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ) output = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) output = audio_classifier( [audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 ) self.assertEqual( nested_simplify(output), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) @unittest.skip("No models are available in TF") def test_large_model_tf(self): pass
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license deactivating auto tests since we don t have a good modelforxx mapping and only clip would be there for now modelmapping clipconfig clipmodel def gettestpipelineself model tokenizer processor if tokenizer is none side effect of no fast tokenizer class for these model so skipping but the slow tokenizer test should still run as they re quite small self skiptestno tokenizer available return return none none imageclassifier zeroshotimageclassificationpipeline modelmodel tokenizertokenizer featureextractorprocessor test with a raw waveform image image open testsfixturestestssamplescoco000000039769 png image2 image open testsfixturestestssamplescoco000000039769 png return imageclassifier image image2 def runpipelinetestself pipe examples image image open testsfixturestestssamplescoco000000039769 png outputs pipeimage candidatelabelsa b self assertequaloutputs text anystr batching outputs pipeimage 3 batchsize2 candidatelabelsa b the floating scores are so close we enter floating error approximation and the order is not guaranteed across python and torch versions pipeline outputs are supposed to be deterministic and so we could in theory have real values a b c instead of anystr however it seems that in this particular case the floating scores are so close we enter floating error approximation and the order is not guaranteed anymore with batching pipeline outputs are supposed to be deterministic and so we could in theory have real values a b c instead of anystr however it seems that in this particular case the floating scores are so close we enter floating error approximation and the order is not guaranteed anymore with batching this is an image of 2 cats with remotes and no planes this is an image of 2 cats with remotes and no planes 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license deactivating auto tests since we don t have a good model_for_xx mapping and only clip would be there for now model_mapping clipconfig clipmodel def get_test_pipeline self model tokenizer processor if tokenizer is none side effect of no fast tokenizer class for these model so skipping but the slow tokenizer test should still run as they re quite small self skiptest no tokenizer available return return none none image_classifier zeroshotimageclassificationpipeline model model tokenizer tokenizer feature_extractor processor test with a raw waveform image image open tests fixtures tests_samples coco 000000039769 png image2 image open tests fixtures tests_samples coco 000000039769 png return image_classifier image image2 def run_pipeline_test self pipe examples image image open tests fixtures tests_samples coco 000000039769 png outputs pipe image candidate_labels a b self assertequal outputs text any str batching outputs pipe image 3 batch_size 2 candidate_labels a b the floating scores are so close we enter floating error approximation and the order is not guaranteed across python and torch versions pipeline outputs are supposed to be deterministic and so we could in theory have real values a b c instead of any str however it seems that in this particular case the floating scores are so close we enter floating error approximation and the order is not guaranteed anymore with batching pipeline outputs are supposed to be deterministic and so we could in theory have real values a b c instead of any str however it seems that in this particular case the floating scores are so close we enter floating error approximation and the order is not guaranteed anymore with batching this is an image of 2 cats with remotes and no planes this is an image of 2 cats with remotes and no planes
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ZeroShotImageClassificationPipelineTests(unittest.TestCase): @require_torch def test_small_model_pt(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) self.assertIn( nested_simplify(output), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], [{"score": 0.333, "label": "b"}, {"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}], ], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @require_tf def test_small_model_tf(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) self.assertEqual( nested_simplify(output), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @slow @require_torch def test_large_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def test_large_model_tf(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, )
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_torch class ZeroShotObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) examples = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def run_pipeline_test(self, object_detector, examples): outputs = object_detector(examples[0], threshold=0.0) n = len(outputs) self.assertGreater(n, 0) self.assertEqual( outputs, [ { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, } for i in range(n) ], ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) outputs = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png", candidate_labels=["cat", "remote", "couch"], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ], ) outputs = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ], ) @require_torch @slow def test_large_model_pt(self): object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ) outputs = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ], ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF") def test_large_model_tf(self): pass @require_torch @slow def test_threshold(self): threshold = 0.2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], threshold=threshold, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ], ) @require_torch @slow def test_top_k(self): top_k = 2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], top_k=top_k, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ], )
codingutf8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license simple test that checks if a user passes a wrong backend an error is raised this should work fine these should work fine llmawq does not work on a t4 simple test that checks if one uses a config and converts it to a dict the dict is the same as the config object simple test that checks if one uses a dict and converts it to a config object the config object is the same as the dict called only once for all test in this class setup quantized model simple test that checks if the quantized model has been converted properly try with modulesnottoconvert simple test that checks if the quantized model is working properly simple test that checks if the quantized model is working properly with bf16 simple test that checks if the quantized model is working properly simple test that checks if the quantized model is working properly after being saved and loaded simple test that checks if one passes a quantization config to quantize a model it raises an error simple test that checks if the quantized model is working properly with multiple gpus coding utf 8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license simple test that checks if a user passes a wrong backend an error is raised this should work fine these should work fine llmawq does not work on a t4 simple test that checks if one uses a config and converts it to a dict the dict is the same as the config object simple test that checks if one uses a dict and converts it to a config object the config object is the same as the dict called only once for all test in this class setup quantized model simple test that checks if the quantized model has been converted properly try with modules_not_to_convert simple test that checks if the quantized model is working properly simple test that checks if the quantized model is working properly with bf16 simple test that checks if the quantized model is working properly simple test that checks if the quantized model is working properly after being saved and loaded simple test that checks if one passes a quantization config to quantize a model it raises an error simple test that checks if the quantized model is working properly with multiple gpus
import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AwqConfig, OPTForCausalLM from transformers.testing_utils import ( require_accelerate, require_auto_awq, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class AwqConfigTest(unittest.TestCase): def test_wrong_backend(self): _ = AwqConfig(bits=4) with self.assertRaises(ValueError): AwqConfig(bits=4, backend="") _ = AwqConfig(bits=4, version="GEMM") _ = AwqConfig(bits=4, version="gemm") with self.assertRaises(ValueError): AwqConfig(bits=4, backend="unexisting-backend") with self.assertRaises(ValueError): AwqConfig(bits=4, backend="llm-awq") def test_to_dict(self): quantization_config = AwqConfig(bits=4) config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): dict = {"bits": 2, "zero_point": False, "backend": "autoawq"} quantization_config = AwqConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) self.assertEqual(dict["zero_point"], quantization_config.zero_point) self.assertEqual(dict["backend"], quantization_config.backend) @slow @require_torch_gpu @require_auto_awq @require_accelerate class AwqTest(unittest.TestCase): model_name = "TheBloke/Mistral-7B-v0.1-AWQ" dummy_transformers_model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish" EXPECTED_OUTPUT_BF16 = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Exercise and Sport Science with a" device_map = "cuda" @classmethod def setUpClass(cls): cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, ) def test_quantized_model_conversion(self): from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV from transformers.integrations.awq import replace_with_awq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = AwqConfig(bits=4) with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_awq_linear(model, quantization_config=quantization_config) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears, nb_awq_linear) with init_empty_weights(): model = OPTForCausalLM(config) model, _ = replace_with_awq_linear( model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] ) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears - 1, nb_awq_linear) def test_quantized_model(self): input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_quantized_model_bf16(self): input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.bfloat16).to( torch_device ) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_BF16) def test_quantized_model_no_device_map(self): input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name).to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_save_pretrained(self): with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_quantization(self): quantization_config = AwqConfig(bits=4) with self.assertRaises(ValueError) as context: _ = AutoModelForCausalLM.from_pretrained( self.dummy_transformers_model_name, quantization_config=quantization_config ) self.assertEqual( str(context.exception), "You cannot pass an `AwqConfig` when loading a model as you can only use AWQ models for inference. To quantize transformers models with AWQ algorithm, please refer to our quantization docs: https://huggingface.co/docs/transformers/main_classes/quantization ", ) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1, 2, 3}) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
codingutf8 2022 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license wraps a linear layer with loralike adapter used for testing purposes only def initself module nn module rank int super init self module module self adapter nn sequential nn linearmodule infeatures rank biasfalse nn linearrank module outfeatures biasfalse smallstd 2 0 5 minmodule infeatures module outfeatures 0 5 nn init normalself adapter0 weight stdsmallstd nn init zerosself adapter1 weight self adapter tomodule weight device def forwardself input args kwargs return self moduleinput args kwargs self adapterinput requirebitsandbytes requireaccelerate requiretorch requiretorchgpu slow class base4bittestunittest testcase we keep the constants inside the init function and model loading inside setup function we need to test on relatively large models aka 1b parameters otherwise the quantiztion may not work as expected therefore here we use only bloom1b3 to test our module modelname bigsciencebloom1b7 constant values expectedrelativedifference 2 109659552692574 this was obtained on a rtx titan so the number might slightly change inputtext hello my name is expectedoutputs set expectedoutputs addhello my name is john and i am a professional photographer i expectedoutputs addhello my name is john ni am a friend of your father n expectedoutputs addhello my name is john doe i am a student at the university maxnewtokens 10 def setupself models and tokenizer self tokenizer autotokenizer frompretrainedself modelname class bnb4bittestbase4bittest def setupself super setup models and tokenizer self modelfp16 automodelforcausallm frompretrained self modelname torchdtypetorch float16 devicemapauto self model4bit automodelforcausallm frompretrainedself modelname loadin4bittrue devicemapauto def teardownself r teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch orgthowcanwereleasegpumemorycache1453027 numparams4bit self model4bit numparameters numparamsfp16 self modelfp16 numparameters self assertequalnumparams4bit numparamsfp16 def testquantizationconfigjsonserializationself r a simple test to check if the quantization config is correctly serialized and deserialized from bitsandbytes nn import params4bit memfp16 self modelfp16 getmemoryfootprint mem4bit self model4bit getmemoryfootprint self assertalmostequalmemfp16 mem4bit self expectedrelativedifference linear getsomelinearlayerself model4bit self asserttruelinear weight class params4bit def testoriginaldtypeself r a simple test to check if the model succesfully stores the original dtype from transformers import t5pretrainedmodel self modelfp16 getmemoryfootprint self model4bit getmemoryfootprint for name module in self model4bit namedmodules if isinstancemodule torch nn linear if name not in lmhead t5pretrainedmodel keepinfp32modules 4bit parameters are packed in uint8 variables self asserttruemodule weight dtype torch uint8 def testrwkv4bitself r a simple test to check if 4bit rwkv inference works as expected encodedinput self tokenizerself inputtext returntensorspt outputsequences self model4bit generateinputidsencodedinputinputids to0 maxnewtokens10 self assertinself tokenizer decodeoutputsequences0 skipspecialtokenstrue self expectedoutputs def testgeneratequalityconfigself r test that loading the model with the config is equivalent with self assertraisesnotimplementederror tempfile temporarydirectory as tmpdirname self model4bit savepretrainedtmpdirname def testraiseifconfigandloadin4bitself r test that loading the model with the config and loadin4bit raises an error with self assertraisesvalueerror tries with str self model4bit tocpu with self assertraisesvalueerror tries with a dtype self model4bit totorch float16 with self assertraisesvalueerror tries with a device self model4bit totorch devicecuda 0 with self assertraisesvalueerror tries with a device self model4bit float with self assertraisesvalueerror tries with a device self model4bit half test if we did not break anything encodedinput self tokenizerself inputtext returntensorspt self modelfp16 self modelfp16 totorch float32 self modelfp16 generateinputidsencodedinputinputids to0 maxnewtokens10 check this does not throw an error self modelfp16 tocpu check this does not throw an error self modelfp16 half check this does not throw an error self modelfp16 float def testfp324bitconversionself r test whether it is possible to mix both 4bit and fp32 weights when using keepinfp32modules correctly gc collect torch cuda emptycache def testinferencewithoutkeepinfp32self r test whether it is possible to mix both 4bit and fp32 weights when using keepinfp32modules correctly flant5small uses t5densegatedactdense whereas t5small uses t5densereludense we need to test both cases test with t5small test with flant5small import bitsandbytes as bnb from transformers import t5forconditionalgeneration test with t5small model t5forconditionalgeneration frompretrainedself modelname loadin4bittrue devicemapauto there was a bug with decoders this test checks that it is fixed self asserttrueisinstancemodel decoder block0 layer0 selfattention q bnb nn linear4bit encodedinput self tokenizerself inputtext returntensorspt to0 model generateencodedinput test with flant5small model t5forconditionalgeneration frompretrained self denseactmodelname loadin4bittrue devicemapauto encodedinput self tokenizerself inputtext returntensorspt to0 model generateencodedinput class classes4bitmodeltestbase4bittest def setupself super setup modelname self modelname bigsciencebloom560m self seqtoseqname t5small different types of model self basemodel automodel frompretrainedself modelname loadin4bittrue devicemapauto sequence classification model self sequencemodel automodelforsequenceclassification frompretrained self modelname loadin4bittrue devicemapauto causallm model self model4bit automodelforcausallm frompretrainedself modelname loadin4bittrue devicemapauto seq2seq model self seqtoseqmodel automodelforseq2seqlm frompretrained self seqtoseqname loadin4bittrue devicemapauto def teardownself r teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch orgthowcanwereleasegpumemorycache1453027 from bitsandbytes nn import params4bit self asserttrueself basemodel h1 mlp dense4htoh weight class params4bit other heads should be nn parameter self asserttrueself model4bit lmhead weight class torch nn parameter self asserttrueself sequencemodel score weight class torch nn parameter self asserttrueself seqtoseqmodel lmhead weight class torch nn parameter class pipeline4bittestbase4bittest def setupself super setup def teardownself r teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch orgthowcanwereleasegpumemorycache1453027 self clearcudacache self pipe pipeline textgeneration modelself modelname modelkwargsdevicemap auto loadin4bit true torchdtype torch float16 maxnewtokensself maxnewtokens real second forward pass pipelineoutput self pipeself inputtext self assertinpipelineoutput0generatedtext self expectedoutputs requiretorchmultigpu class bnb4bittestmultigpubase4bittest def setupself super setup def testmultigpuloadingself r this tests that the model has been loaded and can be used correctly on a multigpu setup let s just try to load a model on 2 gpus and see if it works the model we test has 2gb of total 3gb should suffice check correct device map check that inference pass works on the model second real batch step 1 freeze all parameters cast the small parameters e g layernorm to fp32 for stability step 2 add adapters step 3 dummy batch step 4 check if the gradient is not none coding utf 8 2022 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license wraps a linear layer with lora like adapter used for testing purposes only we keep the constants inside the init function and model loading inside setup function we need to test on relatively large models aka 1b parameters otherwise the quantiztion may not work as expected therefore here we use only bloom 1b3 to test our module constant values this was obtained on a rtx titan so the number might slightly change models and tokenizer models and tokenizer teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch org t how can we release gpu memory cache 14530 27 test if the number of returned parameters is correct see https github com huggingface transformers issues 25978 a simple test to check if the quantization config is correctly serialized and deserialized a simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models a simple test to check if the model succesfully stores the original dtype a simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models 4 bit parameters are packed in uint8 variables a simple test to check if 4 bit rwkv inference works as expected test the generation quality of the quantized model and see that we are matching the expected output given that we are operating on small numbers the testing model is relatively small we might not get the same output across gpus so we ll generate few tokens 5 10 and check their output test that loading the model with the config is equivalent test whether trying to save a model after converting it in 8 bit will throw a warning test that loading the model with the config and load_in_4bit raises an error test whether trying to cast or assigning a device to a model after converting it in 8 bit will throw an error checks also if other models are casted correctly tries with str tries with a dtype tries with a device tries with a device tries with a device test if we did not break anything check this does not throw an error check this does not throw an error check this does not throw an error test whether it is possible to mix both 4bit and fp32 weights when using keep_in_fp32_modules correctly flan t5 uses dense act instead of dense relu dense teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch org t how can we release gpu memory cache 14530 27 test whether it is possible to mix both 4bit and fp32 weights when using keep_in_fp32_modules correctly flan t5 small uses t5densegatedactdense whereas t5 small uses t5densereludense we need to test both cases test with t5 small test with flan t5 small test whether it is possible to mix both 4bit and fp32 weights when using keep_in_fp32_modules correctly flan t5 small uses t5densegatedactdense whereas t5 small uses t5densereludense we need to test both cases test with t5 small there was a bug with decoders this test checks that it is fixed test with flan t5 small model_name different types of model sequence classification model causallm model seq2seq model teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch org t how can we release gpu memory cache 14530 27 a simple test to check if the last modules for some classes automodelforcausallm or sequenceclassification are kept in their native class other heads should be nn parameter teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch org t how can we release gpu memory cache 14530 27 the aim of this test is to verify that the mixed 4bit is compatible with pipeline from transformers since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline self _clear_cuda_cache real second forward pass this tests that the model has been loaded and can be used correctly on a multi gpu setup let s just try to load a model on 2 gpus and see if it works the model we test has 2gb of total 3gb should suffice check correct device map check that inference pass works on the model second real batch step 1 freeze all parameters freeze the model train adapters later cast the small parameters e g layernorm to fp32 for stability step 2 add adapters step 3 dummy batch step 4 check if the gradient is not none
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_4h_to_h if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class Base4bitTest(unittest.TestCase): model_name = "bigscience/bloom-1b7" EXPECTED_RELATIVE_DIFFERENCE = ( 2.109659552692574 ) input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University") MAX_NEW_TOKENS = 10 def setUp(self): self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) class Bnb4BitTest(Base4bitTest): def setUp(self): super().setUp() self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") def tearDown(self): r del self.model_fp16 del self.model_4bit gc.collect() torch.cuda.empty_cache() def test_quantization_num_parameters(self): r num_params_4bit = self.model_4bit.num_parameters() num_params_fp16 = self.model_fp16.num_parameters() self.assertEqual(num_params_4bit, num_params_fp16) def test_quantization_config_json_serialization(self): r config = self.model_4bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_memory_footprint(self): r from bitsandbytes.nn import Params4bit mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_4bit, self.EXPECTED_RELATIVE_DIFFERENCE) linear = get_some_linear_layer(self.model_4bit) self.assertTrue(linear.weight.__class__ == Params4bit) def test_original_dtype(self): r self.assertTrue(hasattr(self.model_4bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_4bit.config._pre_quantization_dtype == torch.float16) def test_linear_are_4bit(self): r from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.uint8) def test_rwkv_4bit(self): r model_id = "RWKV/rwkv-4-169m-pile" quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) tok = AutoTokenizer.from_pretrained(model_id) text = "Hello my name is" input_ids = tok.encode(text, return_tensors="pt").to(0) _ = model.generate(input_ids, max_new_tokens=30) def test_generate_quality(self): r encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_4bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_config(self): r bnb_config = BitsAndBytesConfig() bnb_config.load_in_4bit = True model_4bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_4bit_from_config.generate( input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_raise_on_save_pretrained(self): r with self.assertRaises(NotImplementedError), tempfile.TemporaryDirectory() as tmpdirname: self.model_4bit.save_pretrained(tmpdirname) def test_raise_if_config_and_load_in_4bit(self): r bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_4bit=True, device_map="auto", bnb_4bit_quant_type="nf4", ) def test_device_and_dtype_assignment(self): r with self.assertRaises(ValueError): self.model_4bit.to("cpu") with self.assertRaises(ValueError): self.model_4bit.to(torch.float16) with self.assertRaises(ValueError): self.model_4bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): self.model_4bit.float() with self.assertRaises(ValueError): self.model_4bit.half() encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) _ = self.model_fp16.to("cpu") _ = self.model_fp16.half() _ = self.model_fp16.float() def test_fp32_4bit_conversion(self): r model = AutoModelForSeq2SeqLM.from_pretrained("t5-small", load_in_4bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class Bnb4BitT5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "t5-small" cls.dense_act_model_name = "google/flan-t5-small" cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r import bitsandbytes as bnb from transformers import T5ForConditionalGeneration model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear4bit)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) class Classes4BitModelTest(Base4bitTest): def setUp(self): super().setUp() self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "t5-small" self.base_model = AutoModel.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_4bit=True, device_map="auto" ) self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_4bit=True, device_map="auto" ) def tearDown(self): r del self.base_model del self.sequence_model del self.model_4bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r from bitsandbytes.nn import Params4bit self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Params4bit) self.assertTrue(self.model_4bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class Pipeline4BitTest(Base4bitTest): def setUp(self): super().setUp() def tearDown(self): r del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.float16}, max_new_tokens=self.MAX_NEW_TOKENS, ) pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class Bnb4bitTestMultiGpu(Base4bitTest): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_4bit=True, device_map="balanced" ) self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_parallel = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) class Bnb4BitTestTraining(Base4bitTest): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): return model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True) self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) for param in model.parameters(): param.requires_grad = False if param.ndim == 1: param.data = param.data.to(torch.float32) for _, module in model.named_modules(): if "OPTAttention" in repr(type(module)): module.q_proj = LoRALayer(module.q_proj, rank=16) module.k_proj = LoRALayer(module.k_proj, rank=16) module.v_proj = LoRALayer(module.v_proj, rank=16) batch = self.tokenizer("Test batch ", return_tensors="pt").to(0) with torch.cuda.amp.autocast(): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) class Bnb4BitGPT2Test(Bnb4BitTest): model_name = "gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 3.3191854854152187
codingutf8 2022 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license wraps a linear layer with loralike adapter used for testing purposes only def initself module nn module rank int super init self module module self adapter nn sequential nn linearmodule infeatures rank biasfalse nn linearrank module outfeatures biasfalse smallstd 2 0 5 minmodule infeatures module outfeatures 0 5 nn init normalself adapter0 weight stdsmallstd nn init zerosself adapter1 weight self adapter tomodule weight device def forwardself input args kwargs return self moduleinput args kwargs self adapterinput requirebitsandbytes requireaccelerate requiretorch requiretorchgpu slow class basemixedint8testunittest testcase we keep the constants inside the init function and model loading inside setup function we need to test on relatively large models aka 1b parameters otherwise the quantiztion may not work as expected therefore here we use only bloom1b3 to test our module modelname bigsciencebloom1b7 constant values expectedrelativedifference 1 540025 this was obtained on a quadro rtx 8000 so the number might slightly change inputtext hello my name is expectedoutput hello my name is john ni am a friend of the family n maxnewtokens 10 def setupself models and tokenizer self tokenizer autotokenizer frompretrainedself modelname class mixedint8testbasemixedint8test def setupself super setup models and tokenizer self modelfp16 automodelforcausallm frompretrained self modelname torchdtypetorch float16 devicemapauto self model8bit automodelforcausallm frompretrainedself modelname loadin8bittrue devicemapauto def teardownself r teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch orgthowcanwereleasegpumemorycache1453027 from accelerate import initemptyweights from transformers integrations bitsandbytes import getkeystonotconvert modelid mosaicmlmpt7b config autoconfig frompretrained modelid trustremotecodetrue revisionada218f9a93b5f1c6dce48a4cc9ff01fcba431e7 with initemptyweights model automodelforcausallm fromconfig config trustremotecodetrue coderevisionada218f9a93b5f1c6dce48a4cc9ff01fcba431e7 self assertequalgetkeystonotconvertmodel transformer wte def testgetkeystonotconvertself r test the getkeystonotconvert function the order of the keys does not matter so we sort them before comparing same for the other tests config self model8bit config self asserttruehasattrconfig quantizationconfig config todict config todiffdict config tojsonstring def testoriginaldtypeself r a simple test to check if the model succesfully stores the original dtype from bitsandbytes nn import int8params memfp16 self modelfp16 getmemoryfootprint mem8bit self model8bit getmemoryfootprint self assertalmostequalmemfp16 mem8bit self expectedrelativedifference self asserttruegetsomelinearlayerself model8bit weight class int8params def testlinearare8bitself r a simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models import bitsandbytes as bnb quantizationconfig bitsandbytesconfigloadin8bittrue llmint8skipmodulesclassifier seqclassificationmodel automodelforsequenceclassification frompretrained robertalargemnli quantizationconfigquantizationconfig self asserttrueseqclassificationmodel roberta encoder layer0 output dense weight dtype torch int8 self asserttrue isinstanceseqclassificationmodel roberta encoder layer0 output dense bnb nn linear8bitlt self asserttrueisinstanceseqclassificationmodel classifier dense nn linear self asserttrueseqclassificationmodel classifier dense weight dtype torch int8 self asserttrueisinstanceseqclassificationmodel classifier outproj nn linear self asserttrueseqclassificationmodel classifier outproj torch int8 def testgeneratequalityself r test the generation quality of the quantized model and see that we are matching the expected output given that we are operating on small numbers the testing model is relatively small we might not get the same output across gpus so we ll generate few tokens 510 and check their output bnbconfig bitsandbytesconfig bnbconfig loadin8bit true model8bitfromconfig automodelforcausallm frompretrained self modelname quantizationconfigbnbconfig devicemapauto encodedinput self tokenizerself inputtext returntensorspt outputsequences model8bitfromconfig generate inputidsencodedinputinputids to0 maxnewtokens10 self assertequalself tokenizer decodeoutputsequences0 skipspecialtokenstrue self expectedoutput def testraiseifconfigandloadin8bitself r test that loading the model with the config and loadin8bit raises an error with self assertraisesvalueerror tries with str self model8bit tocpu with self assertraisesvalueerror tries with a dtype self model8bit totorch float16 with self assertraisesvalueerror tries with a device self model8bit totorch devicecuda 0 with self assertraisesvalueerror tries with a device self model8bit float with self assertraisesvalueerror tries with a device self model8bit half test if we did not break anything encodedinput self tokenizerself inputtext returntensorspt self modelfp16 self modelfp16 totorch float32 self modelfp16 generateinputidsencodedinputinputids to0 maxnewtokens10 check this does not throw an error self modelfp16 tocpu check this does not throw an error self modelfp16 half check this does not throw an error self modelfp16 float def testfp32int8conversionself r test whether it is possible to mix both int8 and fp32 weights when using keepinfp32modules correctly from bitsandbytes nn import int8params with tempfile temporarydirectory as tmpdirname self model8bit savepretrainedtmpdirname check that the file quantizationconfig is present config autoconfig frompretrainedtmpdirname self asserttruehasattrconfig quantizationconfig modelfromsaved automodelforcausallm frompretrainedtmpdirname loadin8bittrue devicemapauto linear getsomelinearlayermodelfromsaved self asserttruelinear weight class int8params self asserttruehasattrlinear weight scb generate encodedinput self tokenizerself inputtext returntensorspt outputsequences modelfromsaved generateinputidsencodedinputinputids to0 maxnewtokens10 self assertequal self tokenizer decodeoutputsequences0 skipspecialtokenstrue self expectedoutput def testint8serializationregressionself r test whether it is possible to serialize a model in 8bit using not safetensors check that the file quantizationconfig is present generate from bitsandbytes nn import int8params with tempfile temporarydirectory as tmpdirname self model8bit savepretrainedtmpdirname maxshardsize200mb check that the file quantizationconfig is present config autoconfig frompretrainedtmpdirname self asserttruehasattrconfig quantizationconfig modelfromsaved automodelforcausallm frompretrainedtmpdirname linear getsomelinearlayermodelfromsaved self asserttruelinear weight class int8params self asserttruehasattrlinear weight scb generate encodedinput self tokenizerself inputtext returntensorspt outputsequences modelfromsaved generateinputidsencodedinputinputids to0 maxnewtokens10 self assertequal self tokenizer decodeoutputsequences0 skipspecialtokenstrue self expectedoutput def testint8frompretrainedself r test whether loading a 8bit model from the hub works as expected generate gc collect torch cuda emptycache def testinferencewithoutkeepinfp32self r test whether it is possible to mix both int8 and fp32 weights when using keepinfp32modules correctly flant5small uses t5densegatedactdense whereas t5small uses t5densereludense we need to test both cases test with t5small test with flant5small import bitsandbytes as bnb from transformers import t5forconditionalgeneration test with t5small model t5forconditionalgeneration frompretrainedself modelname loadin8bittrue devicemapauto there was a bug with decoders this test checks that it is fixed self asserttrueisinstancemodel decoder block0 layer0 selfattention q bnb nn linear8bitlt encodedinput self tokenizerself inputtext returntensorspt to0 model generateencodedinput test with flant5small model t5forconditionalgeneration frompretrained self denseactmodelname loadin8bittrue devicemapauto encodedinput self tokenizerself inputtext returntensorspt to0 model generateencodedinput def testinferencewithkeepinfp32serializedself r test whether it is possible to mix both int8 and fp32 weights when using keepinfp32modules correctly on a serialized model flant5small uses t5densegatedactdense whereas t5small uses t5densereludense we need to test both cases test with t5small there was a bug with decoders this test checks that it is fixed test with flant5small modelname different types of model sequence classification model causallm model seq2seq model del self basemodel del self sequencemodel del self model8bit del self seqtoseqmodel gc collect torch cuda emptycache def testcorrectheadclassself r a simple test to check if the last modules for some classes automodelforcausallm or sequenceclassification are kept in their native class last param of a base model should be a linear8bit module other heads should be nn parameter del self pipe gc collect torch cuda emptycache def testpipelineself r the aim of this test is to verify that the mixed int8 is compatible with pipeline from transformers since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline self clearcudacache real second forward pass modelparallel automodelforcausallm frompretrained self modelname loadin8bittrue devicemapbalanced check correct device map self assertequalsetmodelparallel hfdevicemap values 0 1 check that inference pass works on the model encodedinput self tokenizerself inputtext returntensorspt second real batch outputparallel modelparallel generateinputidsencodedinputinputids to0 maxnewtokens10 self assertequalself tokenizer decodeoutputparallel0 skipspecialtokenstrue self expectedoutput requiretorchmultigpu class mixedint8testcpugpubasemixedint8test def setupself super setup def checkinferencecorrectnessself model check that inference pass works on the model encodedinput self tokenizerself inputtext returntensorspt check the exactness of the results outputparallel model generateinputidsencodedinputinputids to0 maxnewtokens10 get the generation outputtext self tokenizer decodeoutputparallel0 skipspecialtokenstrue self assertequaloutputtext self expectedoutput def testcpugpuloadingrandomdevicemapself r a test to check is dispatching a model on cpu gpu works correctly using a random devicemap check that the model has been correctly set on device 0 1 and cpu devicemap transformer wordembeddings cpu transformer wordembeddingslayernorm cpu lmhead cpu transformer h 0 transformer lnf 1 bnbconfig bitsandbytesconfigllmint8enablefp32cpuoffloadtrue load model model8bit automodelforcausallm frompretrained self modelname devicemapdevicemap quantizationconfigbnbconfig check that the model has been correctly set on device 0 1 and cpu self assertequalsetmodel8bit hfdevicemap values 0 1 cpu self checkinferencecorrectnessmodel8bit def testcpugpudiskloadingcustomdevicemapself r a test to check is dispatching a model on cpu gpu works correctly using a custom devicemap this time we also add disk on the devicemap load model check that the model has been correctly set on device 0 1 and cpu devicemap transformer wordembeddings 0 transformer wordembeddingslayernorm cpu lmhead 0 transformer h 1 transformer lnf disk with tempfile temporarydirectory as tmpdirname load model model8bit automodelforcausallm frompretrained self modelname devicemapdevicemap loadin8bittrue llmint8enablefp32cpuoffloadtrue offloadfoldertmpdirname check that the model has been correctly set on device 0 1 and cpu self assertequalsetmodel8bit hfdevicemap values 0 1 cpu disk self checkinferencecorrectnessmodel8bit class mixedint8testtrainingbasemixedint8test def setupself self modelname facebookopt350m super setup def testtrainingself if version parseimportlib metadata versionbitsandbytes version parse0 37 0 return step 1 freeze all parameters model automodelforcausallm frompretrainedself modelname loadin8bittrue self assertequalsetmodel hfdevicemap values torch cuda currentdevice for param in model parameters param requiresgrad false freeze the model train adapters later if param ndim 1 cast the small parameters e g layernorm to fp32 for stability param data param data totorch float32 step 2 add adapters for module in model namedmodules if optattention in reprtypemodule module qproj loralayermodule qproj rank16 module kproj loralayermodule kproj rank16 module vproj loralayermodule vproj rank16 step 3 dummy batch batch self tokenizertest batch returntensorspt to0 step 4 check if the gradient is not none with torch cuda amp autocast out model forwardbatch out logits norm backward for module in model modules if isinstancemodule loralayer self asserttruemodule adapter1 weight grad is not none self asserttruemodule adapter1 weight grad norm item 0 elif isinstancemodule nn embedding self asserttruemodule weight grad is none class mixedint8gpt2testmixedint8test modelname gpt2xl expectedrelativedifference 1 8720077507258357 expectedoutput hello my name is john doe and i m a big fan of def testint8frompretrainedself r test whether loading a 8bit model from the hub works as expected generate coding utf 8 2022 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license wraps a linear layer with lora like adapter used for testing purposes only we keep the constants inside the init function and model loading inside setup function we need to test on relatively large models aka 1b parameters otherwise the quantiztion may not work as expected therefore here we use only bloom 1b3 to test our module constant values this was obtained on a quadro rtx 8000 so the number might slightly change models and tokenizer models and tokenizer teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch org t how can we release gpu memory cache 14530 27 test the get_keys_to_not_convert function with trust_remote_code models test the get_keys_to_not_convert function the order of the keys does not matter so we sort them before comparing same for the other tests a simple test to check if the quantization config is correctly serialized and deserialized a simple test to check if the model succesfully stores the original dtype a simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models a simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models a simple test to check if llm_int8_skip_modules works as expected test the generation quality of the quantized model and see that we are matching the expected output given that we are operating on small numbers the testing model is relatively small we might not get the same output across gpus so we ll generate few tokens 5 10 and check their output test that loading the model with the config is equivalent test that loading the model with the config and load_in_8bit raises an error test whether trying to cast or assigning a device to a model after converting it in 8 bit will throw an error checks also if other models are casted correctly tries with str tries with a dtype tries with a device tries with a device tries with a device test if we did not break anything check this does not throw an error check this does not throw an error check this does not throw an error test whether it is possible to mix both int8 and fp32 weights when using keep_in_fp32_modules correctly test whether it is possible to serialize a model in 8 bit check that the file quantization_config is present generate test whether it is possible to serialize a model in 8 bit using not safetensors check that the file quantization_config is present generate test whether it is possible to serialize a model in 8 bit sharded version check that the file quantization_config is present generate test whether loading a 8bit model from the hub works as expected generate flan t5 uses dense act instead of dense relu dense teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch org t how can we release gpu memory cache 14530 27 test whether it is possible to mix both int8 and fp32 weights when using keep_in_fp32_modules correctly flan t5 small uses t5densegatedactdense whereas t5 small uses t5densereludense we need to test both cases test with t5 small test with flan t5 small test whether it is possible to mix both int8 and fp32 weights when using keep_in_fp32_modules correctly flan t5 small uses t5densegatedactdense whereas t5 small uses t5densereludense we need to test both cases test with t5 small there was a bug with decoders this test checks that it is fixed test with flan t5 small test whether it is possible to mix both int8 and fp32 weights when using keep_in_fp32_modules correctly on a serialized model flan t5 small uses t5densegatedactdense whereas t5 small uses t5densereludense we need to test both cases test with t5 small there was a bug with decoders this test checks that it is fixed test with flan t5 small model_name different types of model sequence classification model causallm model seq2seq model teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch org t how can we release gpu memory cache 14530 27 a simple test to check if the last modules for some classes automodelforcausallm or sequenceclassification are kept in their native class last param of a base model should be a linear8bit module other heads should be nn parameter teardown function needs to be called at the end of each test to free the gpu memory and cache also to avoid unexpected behaviors please see https discuss pytorch org t how can we release gpu memory cache 14530 27 the aim of this test is to verify that the mixed int8 is compatible with pipeline from transformers since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline self _clear_cuda_cache real second forward pass this tests that the model has been loaded and can be used correctly on a multi gpu setup let s just try to load a model on 2 gpus and see if it works the model we test has 2gb of total 3gb should suffice check correct device map check that inference pass works on the model second real batch check that inference pass works on the model check the exactness of the results get the generation a test to check is dispatching a model on cpu gpu works correctly using a random device_map check that the model has been correctly set on device 0 1 and cpu a test to check is dispatching a model on cpu gpu works correctly using a custom device_map this time the device map is more organized than the test above and uses the abstraction transformer h to encapsulate all the decoder layers load model check that the model has been correctly set on device 0 1 and cpu a test to check is dispatching a model on cpu gpu works correctly using a custom device_map this time we also add disk on the device_map load model check that the model has been correctly set on device 0 1 and cpu a test to check is dispatching a model on cpu gpu works correctly using a custom device_map this time we also add disk on the device_map using the kwargs directly instead of the quantization config load model check that the model has been correctly set on device 0 1 and cpu step 1 freeze all parameters freeze the model train adapters later cast the small parameters e g layernorm to fp32 for stability step 2 add adapters step 3 dummy batch step 4 check if the gradient is not none test whether loading a 8bit model from the hub works as expected generate
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_accelerate_available, is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_4h_to_h if is_accelerate_available(): from accelerate import PartialState from accelerate.logging import get_logger logger = get_logger(__name__) _ = PartialState() if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class BaseMixedInt8Test(unittest.TestCase): model_name = "bigscience/bloom-1b7" EXPECTED_RELATIVE_DIFFERENCE = ( 1.540025 ) input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n" MAX_NEW_TOKENS = 10 def setUp(self): self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) class MixedInt8Test(BaseMixedInt8Test): def setUp(self): super().setUp() self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") def tearDown(self): r del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_get_keys_to_not_convert_trust_remote_code(self): r from accelerate import init_empty_weights from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained( model_id, trust_remote_code=True, revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( config, trust_remote_code=True, code_revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) self.assertEqual(get_keys_to_not_convert(model), ["transformer.wte"]) def test_get_keys_to_not_convert(self): r from accelerate import init_empty_weights from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained(model_id, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7") with init_empty_weights(): model = MptForCausalLM(config) self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "transformer.wte"].sort()) model_id = "Salesforce/blip2-opt-2.7b" config = AutoConfig.from_pretrained(model_id, revision="1ef7f63a8f0a144c13fdca8103eb7b4691c74cec") with init_empty_weights(): model = Blip2ForConditionalGeneration(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["language_model.lm_head", "language_model.model.decoder.embed_tokens"].sort(), ) model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") with init_empty_weights(): model = OPTForCausalLM(config) self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "model.decoder.embed_tokens"].sort()) model_id = "roberta-large" config = AutoConfig.from_pretrained(model_id, revision="716877d372b884cad6d419d828bac6c85b3b18d9") with init_empty_weights(): model = AutoModelForMaskedLM.from_config(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["'roberta.embeddings.word_embeddings', 'lm_head', 'lm_head.decoder"].sort(), ) def test_quantization_config_json_serialization(self): r config = self.model_8bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_original_dtype(self): r self.assertTrue(hasattr(self.model_8bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_8bit.config._pre_quantization_dtype == torch.float16) def test_memory_footprint(self): r from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE) self.assertTrue(get_some_linear_layer(self.model_8bit).weight.__class__ == Int8Params) def test_linear_are_8bit(self): r from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.int8) def test_llm_skip(self): r import bitsandbytes as bnb quantization_config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=["classifier"]) seq_classification_model = AutoModelForSequenceClassification.from_pretrained( "roberta-large-mnli", quantization_config=quantization_config ) self.assertTrue(seq_classification_model.roberta.encoder.layer[0].output.dense.weight.dtype == torch.int8) self.assertTrue( isinstance(seq_classification_model.roberta.encoder.layer[0].output.dense, bnb.nn.Linear8bitLt) ) self.assertTrue(isinstance(seq_classification_model.classifier.dense, nn.Linear)) self.assertTrue(seq_classification_model.classifier.dense.weight.dtype != torch.int8) self.assertTrue(isinstance(seq_classification_model.classifier.out_proj, nn.Linear)) self.assertTrue(seq_classification_model.classifier.out_proj != torch.int8) def test_generate_quality(self): r encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_generate_quality_config(self): r bnb_config = BitsAndBytesConfig() bnb_config.load_in_8bit = True model_8bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit_from_config.generate( input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10 ) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_config_and_load_in_8bit(self): r bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_8bit=True, device_map="auto", llm_int8_enable_fp32_cpu_offload=True, ) def test_device_and_dtype_assignment(self): r with self.assertRaises(ValueError): self.model_8bit.to("cpu") with self.assertRaises(ValueError): self.model_8bit.to(torch.float16) with self.assertRaises(ValueError): self.model_8bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): self.model_8bit.float() with self.assertRaises(ValueError): self.model_8bit.half() encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) _ = self.model_fp16.to("cpu") _ = self.model_fp16.half() _ = self.model_fp16.float() def test_fp32_int8_conversion(self): r model = AutoModelForSeq2SeqLM.from_pretrained("t5-small", load_in_8bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) def test_int8_serialization(self): r from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname) config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual( self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) def test_int8_serialization_regression(self): r from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False) config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual( self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) def test_int8_serialization_sharded(self): r from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, max_shard_size="200MB") config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname) linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual( self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) def test_int8_from_pretrained(self): r from bitsandbytes.nn import Int8Params model_id = "ybelkada/bloom-1b7-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class MixedInt8T5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "t5-small" cls.dense_act_model_name = "google/flan-t5-small" cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r import bitsandbytes as bnb from transformers import T5ForConditionalGeneration model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) def test_inference_with_keep_in_fp32_serialized(self): r import bitsandbytes as bnb from transformers import T5ForConditionalGeneration model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = T5ForConditionalGeneration.from_pretrained(tmp_dir) self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) class MixedInt8ModelClassesTest(BaseMixedInt8Test): def setUp(self): super().setUp() self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "t5-small" self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_8bit=True, device_map="auto" ) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_8bit=True, device_map="auto" ) def tearDown(self): r del self.base_model del self.sequence_model del self.model_8bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r from bitsandbytes.nn import Int8Params self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class MixedInt8TestPipeline(BaseMixedInt8Test): def setUp(self): super().setUp() def tearDown(self): r del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_8bit": True}, max_new_tokens=self.MAX_NEW_TOKENS, ) pipeline_output = self.pipe(self.input_text) self.assertEqual(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUT) @require_torch_multi_gpu class MixedInt8TestMultiGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_8bit=True, device_map="balanced" ) self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_parallel = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu class MixedInt8TestCpuGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def check_inference_correctness(self, model): encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True) self.assertEqual(output_text, self.EXPECTED_OUTPUT) def test_cpu_gpu_loading_random_device_map(self): r device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_loading_custom_device_map(self): r device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map(self): r device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) with tempfile.TemporaryDirectory() as tmpdirname: model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, offload_folder=tmpdirname, ) self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): r device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } with tempfile.TemporaryDirectory() as tmpdirname: model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True, offload_folder=tmpdirname, ) self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) class MixedInt8TestTraining(BaseMixedInt8Test): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): return model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True) self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) for param in model.parameters(): param.requires_grad = False if param.ndim == 1: param.data = param.data.to(torch.float32) for _, module in model.named_modules(): if "OPTAttention" in repr(type(module)): module.q_proj = LoRALayer(module.q_proj, rank=16) module.k_proj = LoRALayer(module.k_proj, rank=16) module.v_proj = LoRALayer(module.v_proj, rank=16) batch = self.tokenizer("Test batch ", return_tensors="pt").to(0) with torch.cuda.amp.autocast(): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) class MixedInt8GPT2Test(MixedInt8Test): model_name = "gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 1.8720077507258357 EXPECTED_OUTPUT = "Hello my name is John Doe, and I'm a big fan of" def test_int8_from_pretrained(self): r from bitsandbytes.nn import Int8Params model_id = "ybelkada/gpt2-xl-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
codingutf8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this seems a little small considering that we are doing 4bit quant but we have a small model and ww don t quantize the embeddings called only once for all test in this class setup quantized model memquantized self quantizedmodel getmemoryfootprint self assertalmostequalself memfp16 memquantized self expectedrelativedifference def testdeviceanddtypeassignmentself r test whether trying to cast or assigning a device to a model after quantization will throw an error checks also if other models are casted correctly this should work tries with a dtype self asserttruehasattrself quantizedmodel config prequantizationdtype self assertfalsehasattrself modelfp16 config prequantizationdtype self asserttrueself quantizedmodel config prequantizationdtype torch float16 def testquantizedlayersclassself from autogptq utils importutils import dynamicallyimportquantlinear quantlinear dynamicallyimportquantlinear usetritonfalse descactself descact groupsizeself groupsize bitsself bits disableexllamanot self useexllama disableexllamav2true self asserttrueself quantizedmodel transformer h0 mlp dense4htoh class quantlinear def checkinferencecorrectnessself model r test the generation quality of the quantized model and see that we are matching the expected output given that we are operating on small numbers the testing model is relatively small we might not get the same output across gpus so we ll generate few tokens 510 and check their output check that inference pass works on the model check the exactness of the results get the generation simple test to check the quality of the model by comparing the generated tokens with the expected tokens test the serialization of the model and the loading of the quantized weights works we need to put it directly to the gpu otherwise we won t be able to initialize the exllama kernel test the serialization of the model and the loading of the quantized weights with big model inference test the serialization of the model and the loading of the quantized weights works with another config file we need to put it directly to the gpu otherwise we won t be able to initialize the exllama kernel test gptq model with exllama kernel and descacttrue also known as actorder more information on those arguments here https huggingface codocstransformersmainclassesquantizationtransformers gptqconfig setup quantized model test the generation quality of the quantized model and see that we are matching the expected output given that we are operating on small numbers the testing model is relatively small we might not get the same output across gpus so we ll generate few tokens 510 and check their output check that inference pass works on the model check the exactness of the results get the generation simple test to check the quality of the model by comparing the generated tokens with the expected tokens test if the maxinputlength works it modifies the maximum input length that of the model that runs with exllama backend test gptq model with exllamav2 kernel and descacttrue also known as actorder more information on those arguments here https huggingface codocstransformersmainclassesquantizationtransformers gptqconfig setup quantized model test the generation quality of the quantized model and see that we are matching the expected output given that we are operating on small numbers the testing model is relatively small we might not get the same output across gpus so we ll generate few tokens 510 and check their output check that inference pass works on the model check the exactness of the results get the generation simple test to check the quality of the model by comapring the the generated tokens with the expected tokens fail when run all together coding utf 8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this seems a little small considering that we are doing 4bit quant but we have a small model and ww don t quantize the embeddings called only once for all test in this class setup quantized model a simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model test whether trying to cast or assigning a device to a model after quantization will throw an error checks also if other models are casted correctly this should work tries with a dtype a simple test to check if the model succesfully stores the original dtype simple test to check if the model conversion has been done correctly by checking on the class type of the linear layers of the converted models test the generation quality of the quantized model and see that we are matching the expected output given that we are operating on small numbers the testing model is relatively small we might not get the same output across gpus so we ll generate few tokens 5 10 and check their output check that inference pass works on the model check the exactness of the results get the generation simple test to check the quality of the model by comparing the generated tokens with the expected tokens test the serialization of the model and the loading of the quantized weights works we need to put it directly to the gpu otherwise we won t be able to initialize the exllama kernel test the serialization of the model and the loading of the quantized weights with big model inference test the serialization of the model and the loading of the quantized weights works with another config file we need to put it directly to the gpu otherwise we won t be able to initialize the exllama kernel test gptq model with exllama kernel and desc_act true also known as act order more information on those arguments here https huggingface co docs transformers main_classes quantization transformers gptqconfig setup quantized model test the generation quality of the quantized model and see that we are matching the expected output given that we are operating on small numbers the testing model is relatively small we might not get the same output across gpus so we ll generate few tokens 5 10 and check their output check that inference pass works on the model check the exactness of the results get the generation simple test to check the quality of the model by comparing the generated tokens with the expected tokens test if the max_input_length works it modifies the maximum input length that of the model that runs with exllama backend test gptq model with exllamav2 kernel and desc_act true also known as act order more information on those arguments here https huggingface co docs transformers main_classes quantization transformers gptqconfig setup quantized model test the generation quality of the quantized model and see that we are matching the expected output given that we are operating on small numbers the testing model is relatively small we might not get the same output across gpus so we ll generate few tokens 5 10 and check their output check that inference pass works on the model check the exactness of the results get the generation simple test to check the quality of the model by comapring the the generated tokens with the expected tokens fail when run all together
import tempfile import unittest import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig from transformers.testing_utils import ( is_torch_available, require_accelerate, require_auto_gptq, require_optimum, require_torch_gpu, require_torch_multi_gpu, slow, ) if is_torch_available(): import torch class GPTQConfigTest(unittest.TestCase): def test_bits(self): with self.assertRaises(ValueError): GPTQConfig(bits="") GPTQConfig(bits=1) GPTQConfig(bits=2) GPTQConfig(bits=4) def test_dataset(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, dataset="auto_gpt") GPTQConfig(bits=2, dataset="c4") GPTQConfig(bits=2, dataset="ptb-new") def test_damp_percent(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, damp_percent=10) GPTQConfig(bits=2, damp_percent=-1) GPTQConfig(bits=2, damp_percent="0") GPTQConfig(bits=2, damp_percent=0.01) def test_to_dict(self): quantization_config = GPTQConfig(bits=2) quantization_config.to_dict() def test_from_dict(self): dict = {"bits": 2} quantization_config = GPTQConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) @require_optimum def test_optimum_config(self): from optimum.gptq import GPTQQuantizer config = GPTQConfig(bits=2) optimum_config = GPTQQuantizer.from_dict(config.to_dict_optimum()) self.assertEqual(optimum_config.bits, config.bits) new_config = GPTQConfig.from_dict_optimum(optimum_config.to_dict()) self.assertEqual(optimum_config.bits, new_config.bits) @slow @require_optimum @require_auto_gptq @require_torch_gpu class GPTQTest(unittest.TestCase): model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a professional photographer and I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a student in the University of") EXPECTED_OUTPUTS.add("Hello my name is John and I am a very good looking man.") EXPECTED_OUTPUTS.add("Hello my name is Alyson, I am a student in the") EXPECTED_OUTPUTS.add("Hello my name is Alyson and I am a very sweet,") EXPECTED_RELATIVE_DIFFERENCE = 1.664253062 bits = 4 group_size = 128 desc_act = False use_exllama = False dataset = [ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm." ] device_map = None @classmethod def setUpClass(cls): cls.model_fp16 = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map ) cls.mem_fp16 = cls.model_fp16.get_memory_footprint() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) quantization_config = GPTQConfig( bits=cls.bits, dataset=cls.dataset, tokenizer=cls.tokenizer, group_size=cls.group_size, desc_act=cls.desc_act, use_exllama=cls.use_exllama, ) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map, quantization_config=quantization_config, ) def test_memory_footprint(self): r mem_quantized = self.quantized_model.get_memory_footprint() self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE) def test_device_and_dtype_assignment(self): r if self.device_map is None: _ = self.quantized_model.to(0) with self.assertRaises(ValueError): self.quantized_model.to(torch.float16) def test_original_dtype(self): r self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16) def test_quantized_layers_class(self): from auto_gptq.utils.import_utils import dynamically_import_QuantLinear QuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=not self.use_exllama, disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) def check_inference_correctness(self, model): r encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def check_quantized_layers_type(self, model, value): self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value) def test_generate_quality(self): if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) else: self.check_inference_correctness(self.quantized_model) def test_serialization(self): with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname).to(0) self.check_quantized_layers_type(quantized_model_from_saved, "cuda-old") else: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map={"": 0}) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate def test_serialization_big_model_inference(self): with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto") self.check_inference_correctness(quantized_model_from_saved) def test_change_loading_attributes(self): with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: self.assertEqual(self.quantized_model.config.quantization_config.use_exllama, False) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map={"": 0} ) self.assertEqual(quantized_model_from_saved.config.quantization_config.use_exllama, True) self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMap(GPTQTest): device_map = "auto" @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapExllama(GPTQTest): device_map = "auto" use_exllama = True @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestActOrderExllama(unittest.TestCase): EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year") model_name = "hf-internal-testing/Llama-2-7B-GPTQ" revision = "gptq-4bit-128g-actorder_True" input_text = "Hello my name is" @classmethod def setUpClass(cls): cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, revision=cls.revision, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def check_inference_correctness(self, model): encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama") def test_generate_quality(self): self.check_inference_correctness(self.quantized_model) def test_max_input_length(self): prompt = "I am in Paris and" * 1000 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] > 4028) with self.assertRaises(RuntimeError) as cm: self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) self.assertTrue("temp_state buffer is too small" in str(cm.exception)) prompt = "I am in Paris and" * 500 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] < 4028) self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestExllamaV2(unittest.TestCase): EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year") model_name = "hf-internal-testing/Llama-2-7B-GPTQ" revision = "gptq-4bit-128g-actorder_True" input_text = "Hello my name is" @classmethod def setUpClass(cls): cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2}) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, revision=cls.revision, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllamav2") def check_inference_correctness(self, model): encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality(self): self.check_inference_correctness(self.quantized_model) @pytest.mark.skip @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapCPUOffload(GPTQTest): device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": "cpu", "transformer.h.19": "cpu", "transformer.h.20": "cpu", "transformer.h.21": "cpu", "transformer.h.22": "cpu", "transformer.h.23": 1, "transformer.ln_f": 0, }
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license standard docstring with default standard docstring with default but optional is not using the stars if the default is none we do not erase the value in the docstring if the default is none and set as such in the docstring we do not include it operations are not replaced but put in backtiks fake function to have arguments to test 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 standard docstring with default standard docstring with default but optional is not using the stars if the default is none we do not erase the value in the docstring if the default is none and set as such in the docstring we do not include it operations are not replaced but put in backtiks fake function to have arguments to test
import inspect import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) from check_docstrings import get_default_description, replace_default_in_arg_description class CheckDostringsTested(unittest.TestCase): def test_replace_default_in_arg_description(self): desc_with_default = "`float`, *optional*, defaults to 2.0" self.assertEqual( replace_default_in_arg_description(desc_with_default, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_default, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual(replace_default_in_arg_description(desc_with_default, inspect._empty), "`float`") desc_with_default_typo = "`float`, `optional`, defaults to 2.0" self.assertEqual( replace_default_in_arg_description(desc_with_default_typo, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_default_typo, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_default, None), "`float`, *optional*, defaults to 2.0" ) desc_with_default = "`float`, *optional*, defaults to None" self.assertEqual(replace_default_in_arg_description(desc_with_default, None), "`float`, *optional*") desc_with_default = "`float`, *optional*, defaults to `None`" self.assertEqual(replace_default_in_arg_description(desc_with_default, None), "`float`, *optional*") desc_with_default = "`float`, *optional*, defaults to 1/255" self.assertEqual( replace_default_in_arg_description(desc_with_default, 1 / 255), "`float`, *optional*, defaults to `1/255`" ) desc_with_default = "`float`, *optional*, defaults to `1/255`" self.assertEqual( replace_default_in_arg_description(desc_with_default, 1 / 255), "`float`, *optional*, defaults to `1/255`" ) desc_with_optional = "`float`, *optional*" self.assertEqual( replace_default_in_arg_description(desc_with_optional, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_optional, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual(replace_default_in_arg_description(desc_with_optional, None), "`float`, *optional*") self.assertEqual(replace_default_in_arg_description(desc_with_optional, inspect._empty), "`float`") desc_with_no_optional = "`float`" self.assertEqual( replace_default_in_arg_description(desc_with_no_optional, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_no_optional, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual(replace_default_in_arg_description(desc_with_no_optional, None), "`float`, *optional*") self.assertEqual(replace_default_in_arg_description(desc_with_no_optional, inspect._empty), "`float`") def test_get_default_description(self): def _fake_function(a, b: int, c=1, d: float = 2.0, e: str = "blob"): pass params = inspect.signature(_fake_function).parameters assert get_default_description(params["a"]) == "`<fill_type>`" assert get_default_description(params["b"]) == "`int`" assert get_default_description(params["c"]) == "`<fill_type>`, *optional*, defaults to 1" assert get_default_description(params["d"]) == "`float`, *optional*, defaults to 2.0" assert get_default_description(params["e"]) == '`str`, *optional*, defaults to `"blob"`'
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 noqa e402
import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info from get_test_info import ( get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) BERT_TEST_FILE = os.path.join("tests", "models", "bert", "test_modeling_bert.py") BLIP_TEST_FILE = os.path.join("tests", "models", "blip", "test_modeling_blip.py") class GetTestInfoTester(unittest.TestCase): def test_get_test_to_tester_mapping(self): bert_test_tester_mapping = get_test_to_tester_mapping(BERT_TEST_FILE) blip_test_tester_mapping = get_test_to_tester_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = {"BertModelTest": "BertModelTester"} EXPECTED_BLIP_MAPPING = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(bert_test_tester_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_test_tester_mapping), EXPECTED_BLIP_MAPPING) def test_get_model_to_test_mapping(self): bert_model_test_mapping = get_model_to_test_mapping(BERT_TEST_FILE) blip_model_test_mapping = get_model_to_test_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } EXPECTED_BLIP_MAPPING = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(bert_model_test_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_model_test_mapping), EXPECTED_BLIP_MAPPING) def test_get_model_to_tester_mapping(self): bert_model_tester_mapping = get_model_to_tester_mapping(BERT_TEST_FILE) blip_model_tester_mapping = get_model_to_tester_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } EXPECTED_BLIP_MAPPING = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(bert_model_tester_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_model_tester_mapping), EXPECTED_BLIP_MAPPING)
we define a fixture function below and it will be used by referencing its name from tests we define a fixture function below and it will be used by referencing its name from tests defaults region
import os import pytest from attr import dataclass os.environ["AWS_DEFAULT_REGION"] = "us-east-1" @dataclass class SageMakerTestEnvironment: framework: str role = "arn:aws:iam::558105141721:role/sagemaker_execution_role" hyperparameters = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } distributed_hyperparameters = {**hyperparameters, "max_steps": 1000} @property def metric_definitions(self) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def base_job_name(self) -> str: return f"{self.framework}-transfromers-test" @property def test_path(self) -> str: return f"./tests/sagemaker/scripts/{self.framework}" @property def image_uri(self) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class") def sm_env(request): request.cls.env = SageMakerTestEnvironment(framework=request.cls.framework)
usrbinenv python codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license finetuning the library models for sequence classification on glue you can also adapt this script on your own text classification task pointers for this are left as comments import logging import os import random import sys from dataclasses import dataclass field from typing import optional import numpy as np from datasets import loaddataset loadmetric import transformers from transformers import trainer trainingarguments autoconfig automodelforsequenceclassification autotokenizer datacollatorwithpadding evalprediction hfargumentparser pretrainedconfig defaultdatacollator setseed will import sagemaker model parallelism specific trainer from transformers sagemaker import sagemakertrainer as trainer from transformers sagemaker import sagemakertrainingarguments as trainingarguments from transformers trainerutils import getlastcheckpoint from transformers utils import checkminversion will error if the minimal version of transformers is not installed remove at your own risks checkminversion4 4 2 tasktokeys cola sentence none mnli premise hypothesis mrpc sentence1 sentence2 qnli question sentence qqp question1 question2 rte sentence1 sentence2 sst2 sentence none stsb sentence1 sentence2 wnli sentence1 sentence2 logger logging getloggername dataclass class datatrainingarguments taskname optionalstr field defaultnone metadatahelp the name of the task to train on jointasktokeys keys maxseqlength int field default128 metadata help the maximum total input sequence length after tokenization sequences longer than this will be truncated sequences shorter will be padded overwritecache bool field defaultfalse metadatahelp overwrite the cached preprocessed datasets or not padtomaxlength bool field defaulttrue metadata help whether to pad all samples to maxseqlength if false will pad the samples dynamically when batching to the maximum length in the batch maxtrainsamples optionalint field defaultnone metadata help for debugging purposes or quicker training truncate the number of training examples to this value if set maxvalsamples optionalint field defaultnone metadata help for debugging purposes or quicker training truncate the number of validation examples to this value if set maxtestsamples optionalint field defaultnone metadata help for debugging purposes or quicker training truncate the number of test examples to this value if set trainfile optionalstr field defaultnone metadatahelp a csv or a json file containing the training data validationfile optionalstr field defaultnone metadatahelp a csv or a json file containing the validation data testfile optionalstr fielddefaultnone metadatahelp a csv or a json file containing the test data def postinitself if self taskname is not none self taskname self taskname lower if self taskname not in tasktokeys keys raise valueerrorunknown task you should pick one in jointasktokeys keys elif self trainfile is none or self validationfile is none raise valueerrorneed either a glue task or a trainingvalidation file else trainextension self trainfile split 1 assert trainextension in csv json trainfile should be a csv or a json file validationextension self validationfile split 1 assert validationextension trainextension validationfile should have the same extension csv or json as trainfile dataclass class modelarguments modelnameorpath str field metadatahelp path to pretrained model or model identifier from huggingface comodels configname optionalstr field defaultnone metadatahelp pretrained config name or path if not the same as modelname tokenizername optionalstr field defaultnone metadatahelp pretrained tokenizer name or path if not the same as modelname cachedir optionalstr field defaultnone metadatahelp where do you want to store the pretrained models downloaded from huggingface co usefasttokenizer bool field defaulttrue metadatahelp whether to use one of the fast tokenizer backed by the tokenizers library or not modelrevision str field defaultmain metadatahelp the specific model version to use can be a branch name tag name or commit id useauthtoken bool field defaultfalse metadata help will use the token generated when running huggingfacecli login necessary to use this script with private models def main see all possible arguments in srctransformerstrainingargs py or by passing the help flag to this script we now keep distinct sets of args for a cleaner separation of concerns parser hfargumentparsermodelarguments datatrainingarguments trainingarguments if lensys argv 2 and sys argv1 endswith json if we pass only one argument to the script and it s the path to a json file let s parse it to get our arguments modelargs dataargs trainingargs parser parsejsonfilejsonfileos path abspathsys argv1 else modelargs dataargs trainingargs parser parseargsintodataclasses detecting last checkpoint lastcheckpoint none if os path isdirtrainingargs outputdir and trainingargs dotrain and not trainingargs overwriteoutputdir lastcheckpoint getlastcheckpointtrainingargs outputdir if lastcheckpoint is none and lenos listdirtrainingargs outputdir 0 raise valueerror foutput directory trainingargs outputdir already exists and is not empty use overwriteoutputdir to overcome elif lastcheckpoint is not none logger info fcheckpoint detected resuming training at lastcheckpoint to avoid this behavior change the outputdir or add overwriteoutputdir to train from scratch setup logging logging basicconfig formatasctimes levelnames names messages datefmtmdy h m s handlerslogging streamhandlersys stdout logger setlevellogging info if trainingargs shouldlog else logging warn log on each process the small summary logger warning fprocess rank trainingargs localrank device trainingargs device ngpu trainingargs ngpu fdistributed training booltrainingargs localrank 1 16bits training trainingargs fp16 set the verbosity to info of the transformers logger on main process only if trainingargs shouldlog transformers utils logging setverbosityinfo transformers utils logging enabledefaulthandler transformers utils logging enableexplicitformat logger infoftrainingevaluation parameters trainingargs set seed before initializing model setseedtrainingargs seed get the datasets you can either provide your own csvjson training and evaluation files see below or specify a glue benchmark task the dataset will be downloaded automatically from the datasets hub for csvjson files this script will use as labels the column called label and as pair of sentences the sentences in columns called sentence1 and sentence2 if such column exists or the first two columns not named label if at least two columns are provided if the csvsjsons contain only one nonlabel column the script does single sentence classification on this single column you can easily tweak this behavior see below in distributed training the loaddataset function guarantee that only one local process can concurrently download the dataset if dataargs taskname is not none downloading and loading a dataset from the hub datasets loaddatasetglue dataargs taskname else loading a dataset from your local files csvjson training and evaluation files are needed datafiles train dataargs trainfile validation dataargs validationfile get the test dataset you can provide your own csvjson test file see below when you use dopredict without specifying a glue benchmark task if trainingargs dopredict if dataargs testfile is not none trainextension dataargs trainfile split 1 testextension dataargs testfile split 1 assert testextension trainextension testfile should have the same extension csv or json as trainfile datafilestest dataargs testfile else raise valueerrorneed either a glue task or a test file for dopredict for key in datafiles keys logger infofload a local file for key datafileskey if dataargs trainfile endswith csv loading a dataset from local csv files datasets loaddatasetcsv datafilesdatafiles else loading a dataset from local json files datasets loaddatasetjson datafilesdatafiles see more about loading any type of standard or custom dataset at https huggingface codocsdatasetsloadingdatasets labels if dataargs taskname is not none isregression dataargs taskname stsb if not isregression labellist datasetstrain featureslabel names numlabels lenlabellist else numlabels 1 else trying to have good defaults here don t hesitate to tweak to your needs isregression datasetstrain featureslabel dtype in float32 float64 if isregression numlabels 1 else a useful fast method https huggingface codocsdatasetspackagereferencemainclassesdatasets dataset unique labellist datasetstrain uniquelabel labellist sort let s sort it for determinism numlabels lenlabellist load pretrained model and tokenizer in distributed training the frompretrained methods guarantee that only one local process can concurrently download model vocab config autoconfig frompretrained modelargs configname if modelargs configname else modelargs modelnameorpath numlabelsnumlabels finetuningtaskdataargs taskname cachedirmodelargs cachedir revisionmodelargs modelrevision tokentrue if modelargs useauthtoken else none tokenizer autotokenizer frompretrained modelargs tokenizername if modelargs tokenizername else modelargs modelnameorpath cachedirmodelargs cachedir usefastmodelargs usefasttokenizer revisionmodelargs modelrevision tokentrue if modelargs useauthtoken else none model automodelforsequenceclassification frompretrained modelargs modelnameorpath fromtfbool ckpt in modelargs modelnameorpath configconfig cachedirmodelargs cachedir revisionmodelargs modelrevision tokentrue if modelargs useauthtoken else none preprocessing the datasets if dataargs taskname is not none sentence1key sentence2key tasktokeysdataargs taskname else again we try to have some nice defaults but don t hesitate to tweak to your use case nonlabelcolumnnames name for name in datasetstrain columnnames if name label if sentence1 in nonlabelcolumnnames and sentence2 in nonlabelcolumnnames sentence1key sentence2key sentence1 sentence2 else if lennonlabelcolumnnames 2 sentence1key sentence2key nonlabelcolumnnames 2 else sentence1key sentence2key nonlabelcolumnnames0 none padding strategy if dataargs padtomaxlength padding maxlength else we will pad later dynamically at batch creation to the max sequence length in each batch padding false some models have set the order of the labels to use so let s make sure we do use it labeltoid none if model config label2id pretrainedconfignumlabelsnumlabels label2id and dataargs taskname is not none and not isregression some have all caps in their config some don t labelnametoid k lower v for k v in model config label2id items if sortedlabelnametoid keys sortedlabellist labeltoid i intlabelnametoidlabellisti for i in rangenumlabels else logger warning your model seems to have been trained with labels but they don t match the dataset fmodel labels sortedlabelnametoid keys dataset labels sortedlabellist nignoring the model labels as a result elif dataargs taskname is none and not isregression labeltoid v i for i v in enumeratelabellist if dataargs maxseqlength tokenizer modelmaxlength logger warning fthe maxseqlength passed dataargs maxseqlength is larger than the maximum length for the fmodel tokenizer modelmaxlength using maxseqlengthtokenizer modelmaxlength maxseqlength mindataargs maxseqlength tokenizer modelmaxlength def preprocessfunctionexamples tokenize the texts args examplessentence1key if sentence2key is none else examplessentence1key examplessentence2key result tokenizerargs paddingpadding maxlengthmaxseqlength truncationtrue map labels to ids not necessary for glue tasks if labeltoid is not none and label in examples resultlabel labeltoidl if l 1 else 1 for l in exampleslabel return result datasets datasets mappreprocessfunction batchedtrue loadfromcachefilenot dataargs overwritecache if trainingargs dotrain if train not in datasets raise valueerrordotrain requires a train dataset traindataset datasetstrain if dataargs maxtrainsamples is not none traindataset traindataset selectrangedataargs maxtrainsamples if trainingargs doeval if validation not in datasets and validationmatched not in datasets raise valueerrordoeval requires a validation dataset evaldataset datasetsvalidationmatched if dataargs taskname mnli else validation if dataargs maxvalsamples is not none evaldataset evaldataset selectrangedataargs maxvalsamples if trainingargs dopredict or dataargs taskname is not none or dataargs testfile is not none if test not in datasets and testmatched not in datasets raise valueerrordopredict requires a test dataset testdataset datasetstestmatched if dataargs taskname mnli else test if dataargs maxtestsamples is not none testdataset testdataset selectrangedataargs maxtestsamples log a few random samples from the training set if trainingargs dotrain for index in random samplerangelentraindataset 3 logger infofsample index of the training set traindatasetindex get the metric function if dataargs taskname is not none metric loadmetricglue dataargs taskname todo when datasets metrics include regular accuracy make an else here and remove special branch from computemetrics you can define your custom computemetrics function it takes an evalprediction object a namedtuple with a predictions and labelids field and has to return a dictionary string to float def computemetricsp evalprediction preds p predictions0 if isinstancep predictions tuple else p predictions preds np squeezepreds if isregression else np argmaxpreds axis1 if dataargs taskname is not none result metric computepredictionspreds referencesp labelids if lenresult 1 resultcombinedscore np meanlistresult values item return result elif isregression return mse preds p labelids 2 mean item else return accuracy preds p labelids astypenp float32 mean item data collator will default to datacollatorwithpadding so we change it if we already did the padding if dataargs padtomaxlength datacollator defaultdatacollator elif trainingargs fp16 datacollator datacollatorwithpaddingtokenizer padtomultipleof8 else datacollator none initialize our trainer trainer trainer modelmodel argstrainingargs traindatasettraindataset if trainingargs dotrain else none evaldatasetevaldataset if trainingargs doeval else none computemetricscomputemetrics tokenizertokenizer datacollatordatacollator training if trainingargs dotrain checkpoint none if lastcheckpoint is not none checkpoint lastcheckpoint elif os path isdirmodelargs modelnameorpath check the config from that potential checkpoint has the right number of labels before using it as a checkpoint if autoconfig frompretrainedmodelargs modelnameorpath numlabels numlabels checkpoint modelargs modelnameorpath trainresult trainer trainresumefromcheckpointcheckpoint metrics trainresult metrics maxtrainsamples dataargs maxtrainsamples if dataargs maxtrainsamples is not none else lentraindataset metricstrainsamples minmaxtrainsamples lentraindataset trainer savemodel saves the tokenizer too for easy upload trainer logmetricstrain metrics trainer savemetricstrain metrics trainer savestate evaluation if trainingargs doeval logger info evaluate loop to handle mnli double evaluation matched mismatched tasks dataargs taskname evaldatasets evaldataset if dataargs taskname mnli tasks appendmnlimm evaldatasets appenddatasetsvalidationmismatched for evaldataset task in zipevaldatasets tasks metrics trainer evaluateevaldatasetevaldataset maxvalsamples dataargs maxvalsamples if dataargs maxvalsamples is not none else lenevaldataset metricsevalsamples minmaxvalsamples lenevaldataset trainer logmetricseval metrics trainer savemetricseval metrics if trainingargs dopredict logger info test loop to handle mnli double evaluation matched mismatched tasks dataargs taskname testdatasets testdataset if dataargs taskname mnli tasks appendmnlimm testdatasets appenddatasetstestmismatched for testdataset task in ziptestdatasets tasks removing the label columns because it contains 1 and trainer won t like that testdataset testdataset removecolumnslabel predictions trainer predicttestdatasettestdataset predictions predictions np squeezepredictions if isregression else np argmaxpredictions axis1 outputtestfile os path jointrainingargs outputdir ftestresultstask txt if trainer isworldprocesszero with openoutputtestfile w as writer logger infof test results task writer writeindextpredictionn for index item in enumeratepredictions if isregression writer writefindextitem 3 3fn else item labellistitem writer writefindextitemn def mpfnindex for xlaspawn tpus main if name main main usr bin env python coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license finetuning the library models for sequence classification on glue you can also adapt this script on your own text classification task pointers for this are left as comments trainer trainingarguments will import sagemaker model parallelism specific trainer will error if the minimal version of transformers is not installed remove at your own risks arguments pertaining to what data we are going to input our model for training and eval using hfargumentparser we can turn this class into argparse arguments to be able to specify them on the command line arguments pertaining to which model config tokenizer we are going to fine tune from see all possible arguments in src transformers training_args py or by passing the help flag to this script we now keep distinct sets of args for a cleaner separation of concerns if we pass only one argument to the script and it s the path to a json file let s parse it to get our arguments detecting last checkpoint setup logging log on each process the small summary set the verbosity to info of the transformers logger on main process only set seed before initializing model get the datasets you can either provide your own csv json training and evaluation files see below or specify a glue benchmark task the dataset will be downloaded automatically from the datasets hub for csv json files this script will use as labels the column called label and as pair of sentences the sentences in columns called sentence1 and sentence2 if such column exists or the first two columns not named label if at least two columns are provided if the csvs jsons contain only one non label column the script does single sentence classification on this single column you can easily tweak this behavior see below in distributed training the load_dataset function guarantee that only one local process can concurrently download the dataset downloading and loading a dataset from the hub loading a dataset from your local files csv json training and evaluation files are needed get the test dataset you can provide your own csv json test file see below when you use do_predict without specifying a glue benchmark task loading a dataset from local csv files loading a dataset from local json files see more about loading any type of standard or custom dataset at https huggingface co docs datasets loading_datasets labels trying to have good defaults here don t hesitate to tweak to your needs a useful fast method https huggingface co docs datasets package_reference main_classes datasets dataset unique let s sort it for determinism load pretrained model and tokenizer in distributed training the from_pretrained methods guarantee that only one local process can concurrently download model vocab preprocessing the datasets again we try to have some nice defaults but don t hesitate to tweak to your use case padding strategy we will pad later dynamically at batch creation to the max sequence length in each batch some models have set the order of the labels to use so let s make sure we do use it some have all caps in their config some don t tokenize the texts map labels to ids not necessary for glue tasks log a few random samples from the training set get the metric function todo when datasets metrics include regular accuracy make an else here and remove special branch from compute_metrics you can define your custom compute_metrics function it takes an evalprediction object a namedtuple with a predictions and label_ids field and has to return a dictionary string to float data collator will default to datacollatorwithpadding so we change it if we already did the padding initialize our trainer training check the config from that potential checkpoint has the right number of labels before using it as a checkpoint saves the tokenizer too for easy upload evaluation loop to handle mnli double evaluation matched mis matched loop to handle mnli double evaluation matched mis matched removing the label columns because it contains 1 and trainer won t like that for xla_spawn tpus
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import load_dataset, load_metric import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, default_data_collator, set_seed, ) from transformers.sagemaker import SageMakerTrainer as Trainer from transformers.sagemaker import SageMakerTrainingArguments as TrainingArguments from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version check_min_version("4.4.2") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: task_name: Optional[str] = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task or a training/validation file.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if training_args.should_log else logging.WARN) logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) if training_args.should_log: transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") set_seed(training_args.seed) if data_args.task_name is not None: datasets = load_dataset("glue", data_args.task_name) else: data_files = {"train": data_args.train_file, "validation": data_args.validation_file} if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): datasets = load_dataset("csv", data_files=data_files) else: datasets = load_dataset("json", data_files=data_files) if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: label_list = datasets["train"].unique("label") label_list.sort() num_labels = len(label_list) config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: non_label_column_names = [name for name in datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None if data_args.pad_to_max_length: padding = "max_length" else: padding = False label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets and "validation_matched" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in datasets and "test_matched" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") if data_args.task_name is not None: metric = load_metric("glue", data_args.task_name) def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) if training_args.do_train: checkpoint = None if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels: checkpoint = model_args.model_name_or_path train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() if training_args.do_eval: logger.info("*** Evaluate ***") tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") eval_datasets.append(datasets["validation_mismatched"]) for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Test ***") tasks = [data_args.task_name] test_datasets = [test_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") test_datasets.append(datasets["test_mismatched"]) for test_dataset, task in zip(test_datasets, tasks): test_dataset = test_dataset.remove_columns("label") predictions = trainer.predict(test_dataset=test_dataset).predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_test_file, "w") as writer: logger.info(f"***** Test results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") def _mp_fn(index): main() if __name__ == "__main__": main()
hyperparameters sent by the client are passed as commandline arguments to the script overwrite batch size until we have tfglue py set up logging load model and tokenizer load dataset preprocess train dataset preprocess test dataset fine optimizer and loss hyperparameters sent by the client are passed as command line arguments to the script overwrite batch size until we have tf_glue py set up logging load model and tokenizer load dataset smaller the size for train dataset to 5k smaller the size for test dataset to 500 preprocess train dataset preprocess test dataset fine optimizer and loss
import argparse import logging import sys import time import tensorflow as tf from datasets import load_dataset from transformers import AutoTokenizer, TFAutoModelForSequenceClassification if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--epochs", type=int, default=1) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) args, _ = parser.parse_known_args() args.per_device_train_batch_size = 16 args.per_device_eval_batch_size = 16 logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) train_dataset = train_dataset.shuffle().select(range(5000)) test_dataset = test_dataset.shuffle().select(range(500)) train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])).batch( args.per_device_train_batch_size ) test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])).batch( args.per_device_eval_batch_size ) optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) start_train_time = time.time() train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.per_device_train_batch_size) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") for key, value in train_results.history.items(): logger.info(f" {key} = {value}")
load dataset preprocess train dataset preprocess test dataset hyperparameters sent by the client are passed as commandline arguments to the script data model and output directories set up logging load model and tokenizer get datasets fine optimizer and loss training trainresults model fittftraindataset epochsargs epochs batchsizeargs trainbatchsize evaluation save result load dataset preprocess train dataset preprocess test dataset hyperparameters sent by the client are passed as command line arguments to the script data model and output directories set up logging load model and tokenizer get datasets fine optimizer and loss training train_results model fit tf_train_dataset epochs args epochs batch_size args train_batch_size evaluation save result
import argparse import logging import os import sys import time import tensorflow as tf from datasets import load_dataset from tqdm import tqdm from transformers import AutoTokenizer, TFAutoModelForSequenceClassification from transformers.utils import is_sagemaker_dp_enabled if os.environ.get("SDP_ENABLED") or is_sagemaker_dp_enabled(): SDP_ENABLED = True os.environ["SAGEMAKER_INSTANCE_TYPE"] = "p3dn.24xlarge" import smdistributed.dataparallel.tensorflow as sdp else: SDP_ENABLED = False def fit(model, loss, opt, train_dataset, epochs, train_batch_size, max_steps=None): pbar = tqdm(train_dataset) for i, batch in enumerate(pbar): with tf.GradientTape() as tape: inputs, targets = batch outputs = model(batch) loss_value = loss(targets, outputs.logits) if SDP_ENABLED: tape = sdp.DistributedGradientTape(tape, sparse_as_dense=True) grads = tape.gradient(loss_value, model.trainable_variables) opt.apply_gradients(zip(grads, model.trainable_variables)) pbar.set_description(f"Loss: {loss_value:.4f}") if SDP_ENABLED and i == 0: sdp.broadcast_variables(model.variables, root_rank=0) sdp.broadcast_variables(opt.variables(), root_rank=0) if max_steps and i >= max_steps: break train_results = {"loss": loss_value.numpy()} return train_results def get_datasets(tokenizer, train_batch_size, eval_batch_size): train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])) test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])) if SDP_ENABLED: tf_train_dataset = tf_train_dataset.shard(sdp.size(), sdp.rank()) tf_test_dataset = tf_test_dataset.shard(sdp.size(), sdp.rank()) tf_train_dataset = tf_train_dataset.batch(train_batch_size, drop_remainder=True) tf_test_dataset = tf_test_dataset.batch(eval_batch_size, drop_remainder=True) return tf_train_dataset, tf_test_dataset if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--epochs", type=int, default=3) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) parser.add_argument("--max_steps", type=int, default=None) parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"]) parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"]) parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) args, _ = parser.parse_known_args() logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) if SDP_ENABLED: sdp.init() gpus = tf.config.experimental.list_physical_devices("GPU") for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[sdp.local_rank()], "GPU") model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) tf_train_dataset, tf_test_dataset = get_datasets( tokenizer=tokenizer, train_batch_size=args.per_device_train_batch_size, eval_batch_size=args.per_device_eval_batch_size, ) optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) if args.do_train: start_train_time = time.time() train_results = fit( model, loss, optimizer, tf_train_dataset, args.epochs, args.per_device_train_batch_size, max_steps=args.max_steps, ) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") output_eval_file = os.path.join(args.output_dir, "train_results.txt") if not SDP_ENABLED or sdp.rank() == 0: with open(output_eval_file, "w") as writer: logger.info("***** Train results *****") logger.info(train_results) for key, value in train_results.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") if args.do_eval and (not SDP_ENABLED or sdp.rank() == 0): result = model.evaluate(tf_test_dataset, batch_size=args.per_device_eval_batch_size, return_dict=True) logger.info("*** Evaluate ***") output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") logger.info(result) for key, value in result.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") if SDP_ENABLED: if sdp.rank() == 0: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) else: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir)
distributed data settings creates estimator parameterized expand2 4 create estimator run training result dataframe extract kpis get train time from sagemaker job this includes starting preprocessing stopping assert kpis dump tests result into json file to share in pr distributed data settings creates estimator parameterized expand 2 4 create estimator run training result dataframe extract kpis get train time from sagemaker job this includes starting preprocessing stopping assert kpis dump tests result into json file to share in pr
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=job_name, instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(2,)]) def test_script(self, instance_count): estimator = self.create_estimator(instance_count) estimator.fit() result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
configuration for running training on smdistributed model parallel creates estimator parameterized expand2 4 create estimator run training result dataframe extract kpis get train time from sagemaker job this includes starting preprocessing stopping assert kpis dump tests result into json file to share in pr configuration for running training on smdistributed model parallel creates estimator parameterized expand 2 4 create estimator run training result dataframe extract kpis get train time from sagemaker job this includes starting preprocessing stopping assert kpis dump tests result into json file to share in pr
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): mpi_options = { "enabled": True, "processes_per_host": 8, } smp_options = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } distribution = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} name_extension = "trainer" if self.script == "run_glue.py" else "smtrainer" return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, }, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(1,)]) def test_scripz(self, instance_count): estimator = self.create_estimator(instance_count) estimator.fit() result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
creates estimator create estimator run training result dataframe extract kpis get train time from sagemaker job this includes starting preprocessing stopping assert kpis dump tests result into json file to share in pr creates estimator create estimator run training result dataframe extract kpis get train time from sagemaker job this includes starting preprocessing stopping assert kpis dump tests result into json file to share in pr
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class SingleNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count=1): return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-single", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") def test_glue(self): estimator = self.create_estimator() estimator.fit() result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test default config test outfeatures and outindices are correctly set outfeatures and outindices both none outfeatures and outindices both set only outfeatures set only outindices set error raised when outindices do not correspond to outfeatures signature parameters is an ordereddict so argnames order is deterministic check output of last stage is taken if outfeaturesnone outindicesnone check backbone can be initialized with fresh weights verify numfeatures has been initialized in the backbone init test default outputs and verify feature maps test outputhiddenstatestrue test outputattentionstrue coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test default config test out_features and out_indices are correctly set out_features and out_indices both none out_features and out_indices both set only out_features set only out_indices set error raised when out_indices do not correspond to out_features signature parameters is an ordereddict so arg_names order is deterministic check output of last stage is taken if out_features none out_indices none check backbone can be initialized with fresh weights verify num_features has been initialized in the backbone init test default outputs and verify feature maps test output_hidden_states true test output_attentions true
import copy import inspect import tempfile from transformers.testing_utils import require_torch, torch_device from transformers.utils.backbone_utils import BackboneType @require_torch class BackboneTesterMixin: all_model_classes = () has_attentions = True def test_config(self): config_class = self.config_class config = config_class() self.assertIsNotNone(config) num_stages = len(config.depths) if hasattr(config, "depths") else config.num_hidden_layers expected_stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_stages + 1)] self.assertEqual(config.stage_names, expected_stage_names) self.assertTrue(set(config.out_features).issubset(set(config.stage_names))) config = config_class(out_features=None, out_indices=None) self.assertEqual(config.out_features, [config.stage_names[-1]]) self.assertEqual(config.out_indices, [len(config.stage_names) - 1]) config = config_class(out_features=["stem", "stage1"], out_indices=[0, 1]) self.assertEqual(config.out_features, ["stem", "stage1"]) self.assertEqual(config.out_indices, [0, 1]) config = config_class(out_features=["stage1", "stage3"]) self.assertEqual(config.out_features, ["stage1", "stage3"]) self.assertEqual(config.out_indices, [1, 3]) config = config_class(out_indices=[0, 2]) self.assertEqual(config.out_features, [config.stage_names[0], config.stage_names[2]]) self.assertEqual(config.out_indices, [0, 2]) with self.assertRaises(ValueError): config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 2]) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_config_save_pretrained(self): config_class = self.config_class config_first = config_class(out_indices=[0, 1, 2, 3]) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname) self.assertEqual(config_second.to_dict(), config_first.to_dict()) def test_channels(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertEqual(len(model.channels), len(config.out_features)) num_features = model.num_features out_indices = [config.stage_names.index(feat) for feat in config.out_features] out_channels = [num_features[idx] for idx in out_indices] self.assertListEqual(model.channels, out_channels) new_config = copy.deepcopy(config) new_config.out_features = None model = model_class(new_config) self.assertEqual(len(model.channels), 1) self.assertListEqual(model.channels, [num_features[-1]]) new_config = copy.deepcopy(config) new_config.out_indices = None model = model_class(new_config) self.assertEqual(len(model.channels), 1) self.assertListEqual(model.channels, [num_features[-1]]) def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_features)) self.assertEqual(len(model.channels), len(config.out_features)) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) modified_config = copy.deepcopy(config) modified_config.out_features = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) def test_backbone_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for backbone_class in self.all_model_classes: backbone = backbone_class(config) self.assertTrue(hasattr(backbone, "backbone_type")) self.assertTrue(hasattr(backbone, "stage_names")) self.assertTrue(hasattr(backbone, "num_features")) self.assertTrue(hasattr(backbone, "out_indices")) self.assertTrue(hasattr(backbone, "out_features")) self.assertTrue(hasattr(backbone, "out_feature_channels")) self.assertTrue(hasattr(backbone, "channels")) self.assertIsInstance(backbone.backbone_type, BackboneType) self.assertIsNotNone(backbone.num_features) self.assertTrue(len(backbone.channels) == len(backbone.out_indices)) self.assertTrue(len(backbone.stage_names) == len(backbone.num_features)) self.assertTrue(len(backbone.channels) <= len(backbone.num_features)) self.assertTrue(len(backbone.out_feature_channels) == len(backbone.stage_names)) def test_backbone_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() batch_size = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: backbone = backbone_class(config) backbone.to(torch_device) backbone.eval() outputs = backbone(**inputs_dict) self.assertIsInstance(outputs.feature_maps, tuple) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels): self.assertTrue(feature_map.shape[:2], (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) outputs = backbone(**inputs_dict, output_hidden_states=True) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states), len(backbone.stage_names)) for hidden_state, n_channels in zip(outputs.hidden_states, backbone.channels): self.assertTrue(hidden_state.shape[:2], (batch_size, n_channels)) if self.has_attentions: outputs = backbone(**inputs_dict, output_attentions=True) self.assertIsNotNone(outputs.attentions)
codingutf8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license add common fields for text models test that config has the common properties as getters test that config has the common properties as setter some models might not be able to implement setters for commonproperties in that case a notimplementederror is raised test if config class can be called with configpropname some models might not be able to implement setters for commonproperties in that case a notimplementederror is raised coding utf 8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license add common fields for text models test that config has the common properties as getters test that config has the common properties as setter some models might not be able to implement setters for common_properties in that case a notimplementederror is raised test if config class can be called with config prop_name some models might not be able to implement setters for common_properties in that case a notimplementederror is raised
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class ConfigTester(object): def __init__(self, parent, config_class=None, has_text_modality=True, common_properties=None, **kwargs): self.parent = parent self.config_class = config_class self.has_text_modality = has_text_modality self.inputs_dict = kwargs self.common_properties = common_properties def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) common_properties = ( ["hidden_size", "num_attention_heads", "num_hidden_layers"] if self.common_properties is None else self.common_properties ) if self.has_text_modality: common_properties.extend(["vocab_size"]) for prop in common_properties: self.parent.assertTrue(hasattr(config, prop), msg=f"`{prop}` does not exist") for idx, name in enumerate(common_properties): try: setattr(config, name, idx) self.parent.assertEqual( getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}" ) except NotImplementedError: pass for idx, name in enumerate(common_properties): try: config = self.config_class(**{name: idx}) self.parent.assertEqual( getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}" ) except NotImplementedError: pass def create_and_test_config_to_json_string(self): config = self.config_class(**self.inputs_dict) obj = json.loads(config.to_json_string()) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key], value) def create_and_test_config_to_json_file(self): config_first = self.config_class(**self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "config.json") config_first.to_json_file(json_file_path) config_second = self.config_class.from_json_file(json_file_path) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) def create_and_test_config_from_and_save_pretrained(self): config_first = self.config_class(**self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) with self.parent.assertRaises(OSError): self.config_class.from_pretrained(f".{tmpdirname}") def create_and_test_config_from_and_save_pretrained_subfolder(self): config_first = self.config_class(**self.inputs_dict) subfolder = "test" with tempfile.TemporaryDirectory() as tmpdirname: sub_tmpdirname = os.path.join(tmpdirname, subfolder) config_first.save_pretrained(sub_tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname, subfolder=subfolder) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) def create_and_test_config_with_num_labels(self): config = self.config_class(**self.inputs_dict, num_labels=5) self.parent.assertEqual(len(config.id2label), 5) self.parent.assertEqual(len(config.label2id), 5) config.num_labels = 3 self.parent.assertEqual(len(config.id2label), 3) self.parent.assertEqual(len(config.label2id), 3) def check_config_can_be_init_without_params(self): if self.config_class.is_composition: with self.parent.assertRaises(ValueError): config = self.config_class() else: config = self.config_class() self.parent.assertIsNotNone(config) def check_config_arguments_init(self): kwargs = copy.deepcopy(config_common_kwargs) config = self.config_class(**kwargs) wrong_values = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.float16: wrong_values.append(("torch_dtype", config.torch_dtype, torch.float16)) elif getattr(config, key) != value: wrong_values.append((key, getattr(config, key), value)) if len(wrong_values) > 0: errors = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values]) raise ValueError(f"The following keys were not properly set in the config:\n{errors}") def run_common_tests(self): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
codingutf8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license reset repo push to hub via savepretrained reset repo push to hub via savepretrained this has added the proper automap field to the config can t make an isinstance check because the newconfig is from the fakeconfig class of a dynamic module attempt to modify each of intfloatboolstr config records and verify they were updated if this part of the test fails you have arguments to addin configcommonkwargs above config is in subfolder the following should not work without specifying the subfolder a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the model this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 this should pick the new configuration file as the version of transformers is 4 0 0 will need to be adjusted if we reach v42 and this test is still here should pick the old configuration file as the version of transformers is 4 42 0 this repo has two configuration files one for v4 0 0 and above with a different hidden size this checks configurationfile ia not kept in the kwargs by mistake testing an older version by monkeypatching the version in the module it s used coding utf 8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 reset repo push to hub via save_pretrained reset repo push to hub via save_pretrained this has added the proper auto_map field to the config can t make an isinstance check because the new_config is from the fakeconfig class of a dynamic module attempt to modify each of int float bool str config records and verify they were updated int float bool str if this part of the test fails you have arguments to addin config_common_kwargs above config is in subfolder the following should not work without specifying the subfolder a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the model this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 this should pick the new configuration file as the version of transformers is 4 0 0 will need to be adjusted if we reach v42 and this test is still here should pick the old configuration file as the version of transformers is 4 42 0 this repo has two configuration files one for v4 0 0 and above with a different hidden size this checks _configuration_file ia not kept in the kwargs by mistake testing an older version by monkey patching the version in the module it s used
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPT2Config from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig config_common_kwargs = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-config") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-config-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-config") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub("test-config", token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) delete_repo(token=self._token, repo_id="test-config") with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="test-config", push_to_hub=True, token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org", token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) delete_repo(token=self._token, repo_id="valid_org/test-config-org") with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="valid_org/test-config-org", push_to_hub=True, token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_dynamic_config(self): CustomConfig.register_for_auto_class() config = CustomConfig(attribute=42) config.push_to_hub("test-dynamic-config", token=self._token) self.assertDictEqual(config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig"}) new_config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config", trust_remote_code=True) self.assertEqual(new_config.__class__.__name__, "CustomConfig") self.assertEqual(new_config.attribute, 42) class ConfigTestUtils(unittest.TestCase): def test_config_from_string(self): c = GPT2Config() n_embd = c.n_embd + 1 resid_pdrop = c.resid_pdrop + 1.0 scale_attn_weights = not c.scale_attn_weights summary_type = c.summary_type + "foo" c.update_from_string( f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" ) self.assertEqual(n_embd, c.n_embd, "mismatch for key: n_embd") self.assertEqual(resid_pdrop, c.resid_pdrop, "mismatch for key: resid_pdrop") self.assertEqual(scale_attn_weights, c.scale_attn_weights, "mismatch for key: scale_attn_weights") self.assertEqual(summary_type, c.summary_type, "mismatch for key: summary_type") def test_config_common_kwargs_is_complete(self): base_config = PretrainedConfig() missing_keys = [key for key in base_config.__dict__ if key not in config_common_kwargs] self.assertListEqual( missing_keys, ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] ) keys_with_defaults = [key for key, value in config_common_kwargs.items() if value == getattr(base_config, key)] if len(keys_with_defaults) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" f" {', '.join(keys_with_defaults)}." ) def test_nested_config_load_from_dict(self): config = AutoConfig.from_pretrained( "hf-internal-testing/tiny-random-CLIPModel", text_config={"num_hidden_layers": 2} ) self.assertNotIsInstance(config.text_config, dict) self.assertEqual(config.text_config.__class__.__name__, "CLIPTextConfig") def test_from_pretrained_subfolder(self): with self.assertRaises(OSError): _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder") config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder", subfolder="bert") self.assertIsNotNone(config) def test_cached_files_are_used_when_internet_is_down(self): response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") mock_head.assert_called() def test_legacy_load_from_url(self): _ = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" ) def test_local_versioning(self): configuration = AutoConfig.from_pretrained("bert-base-cased") configuration.configuration_files = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(tmp_dir) configuration.hidden_size = 2 json.dump(configuration.to_dict(), open(os.path.join(tmp_dir, "config.4.0.0.json"), "w")) new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 2) configuration.configuration_files = ["config.42.0.0.json"] configuration.hidden_size = 768 configuration.save_pretrained(tmp_dir) shutil.move(os.path.join(tmp_dir, "config.4.0.0.json"), os.path.join(tmp_dir, "config.42.0.0.json")) new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 768) def test_repo_versioning_before(self): repo = "hf-internal-testing/test-two-configs" import transformers as new_transformers new_transformers.configuration_utils.__version__ = "v4.0.0" new_configuration, kwargs = new_transformers.models.auto.AutoConfig.from_pretrained( repo, return_unused_kwargs=True ) self.assertEqual(new_configuration.hidden_size, 2) self.assertDictEqual(kwargs, {}) import transformers as old_transformers old_transformers.configuration_utils.__version__ = "v3.0.0" old_configuration = old_transformers.models.auto.AutoConfig.from_pretrained(repo) self.assertEqual(old_configuration.hidden_size, 768)
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class FeatureExtractionSavingTestMixin: test_cast_dtype = None def test_feat_extract_to_json_string(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) obj = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key], value) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_init_without_params(self): feat_extract = self.feature_extraction_class() self.assertIsNotNone(feat_extract)
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the model this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 reset repo push to hub via savepretrained reset repo push to hub via savepretrained this has added the proper automap field to the config can t make an isinstance check because the newfeatureextractor is from the customfeatureextractor class of a dynamic module coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the model this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 reset repo push to hub via save_pretrained reset repo push to hub via save_pretrained this has added the proper auto_map field to the config can t make an isinstance check because the new_feature_extractor is from the customfeatureextractor class of a dynamic module
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, Wav2Vec2FeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures") class FeatureExtractorUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") mock_head.assert_called() def test_legacy_load_from_url(self): _ = Wav2Vec2FeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class FeatureExtractorPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-feature-extractor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-feature-extractor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-feature-extractor") except HTTPError: pass def test_push_to_hub(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("test-feature-extractor", token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) delete_repo(token=self._token, repo_id="test-feature-extractor") with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( tmp_dir, repo_id="test-feature-extractor", push_to_hub=True, token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) def test_push_to_hub_in_organization(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("valid_org/test-feature-extractor", token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) delete_repo(token=self._token, repo_id="valid_org/test-feature-extractor") with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( tmp_dir, repo_id="valid_org/test-feature-extractor-org", push_to_hub=True, token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) def test_push_to_hub_dynamic_feature_extractor(self): CustomFeatureExtractor.register_for_auto_class() feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("test-dynamic-feature-extractor", token=self._token) self.assertDictEqual( feature_extractor.auto_map, {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"}, ) new_feature_extractor = AutoFeatureExtractor.from_pretrained( f"{USER}/test-dynamic-feature-extractor", trust_remote_code=True ) self.assertEqual(new_feature_extractor.__class__.__name__, "CustomFeatureExtractor")
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpifytrue or a list of pytorch tensors if one specifies torchifytrue one can specify whether the images are of the same resolution or not to avoid getting image widthheight 0 if sizedivisor is defined the image needs to have widthsize sizedivisor pil expects the channel dimension as last dimension this function prepares a video as a list of pil imagesnumpy arrayspytorch tensors video for i in rangenumframes video appendnp random randint255 sizenumchannels width height dtypenp uint8 if not numpify and not torchify pil expects the channel dimension as last dimension video image fromarraynp moveaxisframe 0 1 for frame in video if torchify video torch fromnumpyframe for frame in video return video def preparevideoinputs batchsize numframes numchannels minresolution maxresolution equalresolutionfalse numpifyfalse torchifyfalse assert not numpify and torchify you cannot specify both numpy and pytorch tensors at the same time videoinputs for i in rangebatchsize if equalresolution width height maxresolution else width height np random choicenp arangeminresolution maxresolution 2 video preparevideo numframesnumframes numchannelsnumchannels widthwidth heightheight numpifynumpify torchifytorchify videoinputs appendvideo return videoinputs class imageprocessingtestmixin testcastdtype none def testimageprocessortojsonstringself imageprocessor self imageprocessingclassself imageprocessordict obj json loadsimageprocessor tojsonstring for key value in self imageprocessordict items self assertequalobjkey value def testimageprocessortojsonfileself imageprocessorfirst self imageprocessingclassself imageprocessordict with tempfile temporarydirectory as tmpdirname jsonfilepath os path jointmpdirname imageprocessor json imageprocessorfirst tojsonfilejsonfilepath imageprocessorsecond self imageprocessingclass fromjsonfilejsonfilepath self assertequalimageprocessorsecond todict imageprocessorfirst todict def testimageprocessorfromandsavepretrainedself imageprocessorfirst self imageprocessingclassself imageprocessordict with tempfile temporarydirectory as tmpdirname savedfile imageprocessorfirst savepretrainedtmpdirname0 checkjsonfilehascorrectformatsavedfile imageprocessorsecond self imageprocessingclass frompretrainedtmpdirname self assertequalimageprocessorsecond todict imageprocessorfirst todict def testinitwithoutparamsself imageprocessor self imageprocessingclass self assertisnotnoneimageprocessor requiretorch requirevision def testcastdtypedeviceself if self testcastdtype is not none initialize imageprocessor imageprocessor self imageprocessingclassself imageprocessordict create random pytorch tensors imageinputs self imageprocessortester prepareimageinputsequalresolutionfalse torchifytrue encoding imageprocessorimageinputs returntensorspt for layoutlm compatiblity self assertequalencoding pixelvalues device torch devicecpu self assertequalencoding pixelvalues dtype torch float32 encoding imageprocessorimageinputs returntensorspt totorch float16 self assertequalencoding pixelvalues device torch devicecpu self assertequalencoding pixelvalues dtype torch float16 encoding imageprocessorimageinputs returntensorspt tocpu torch bfloat16 self assertequalencoding pixelvalues device torch devicecpu self assertequalencoding pixelvalues dtype torch bfloat16 with self assertraisestypeerror imageprocessorimageinputs returntensorspt totorch bfloat16 cpu try with text image feature encoding imageprocessorimageinputs returntensorspt encoding updateinputids torch longtensor1 2 3 4 5 6 encoding encoding totorch float16 self assertequalencoding pixelvalues device torch devicecpu self assertequalencoding pixelvalues dtype torch float16 self assertequalencoding inputids dtype torch long def testcallpilself initialize imageprocessing imageprocessing self imageprocessingclassself imageprocessordict create random pil images imageinputs self imageprocessortester prepareimageinputsequalresolutionfalse for image in imageinputs self assertisinstanceimage image image test not batched input encodedimages imageprocessingimageinputs0 returntensorspt pixelvalues expectedoutputimageshape self imageprocessortester expectedoutputimageshapeimageinputs0 self assertequaltupleencodedimages shape 1 expectedoutputimageshape test batched encodedimages imageprocessingimageinputs returntensorspt pixelvalues expectedoutputimageshape self imageprocessortester expectedoutputimageshapeimageinputs self assertequal tupleencodedimages shape self imageprocessortester batchsize expectedoutputimageshape def testcallnumpyself initialize imageprocessing imageprocessing self imageprocessingclassself imageprocessordict create random numpy tensors imageinputs self imageprocessortester prepareimageinputsequalresolutionfalse numpifytrue for image in imageinputs self assertisinstanceimage np ndarray test not batched input encodedimages imageprocessingimageinputs0 returntensorspt pixelvalues expectedoutputimageshape self imageprocessortester expectedoutputimageshapeimageinputs0 self assertequaltupleencodedimages shape 1 expectedoutputimageshape test batched encodedimages imageprocessingimageinputs returntensorspt pixelvalues expectedoutputimageshape self imageprocessortester expectedoutputimageshapeimageinputs self assertequal tupleencodedimages shape self imageprocessortester batchsize expectedoutputimageshape def testcallpytorchself initialize imageprocessing imageprocessing self imageprocessingclassself imageprocessordict create random pytorch tensors imageinputs self imageprocessortester prepareimageinputsequalresolutionfalse torchifytrue for image in imageinputs self assertisinstanceimage torch tensor test not batched input encodedimages imageprocessingimageinputs0 returntensorspt pixelvalues expectedoutputimageshape self imageprocessortester expectedoutputimageshapeimageinputs0 self assertequaltupleencodedimages shape 1 expectedoutputimageshape test batched expectedoutputimageshape self imageprocessortester expectedoutputimageshapeimageinputs encodedimages imageprocessingimageinputs returntensorspt pixelvalues self assertequal tupleencodedimages shape self imageprocessortester batchsize expectedoutputimageshape def testcallnumpy4channelsself test that can process images which have an arbitrary number of channels initialize imageprocessing imageprocessor self imageprocessingclassself imageprocessordict create random numpy tensors self imageprocessortester numchannels 4 imageinputs self imageprocessortester prepareimageinputsequalresolutionfalse numpifytrue test not batched input encodedimages imageprocessor imageinputs0 returntensorspt inputdataformatchannelsfirst imagemean0 imagestd1 pixelvalues expectedoutputimageshape self imageprocessortester expectedoutputimageshapeimageinputs0 self assertequaltupleencodedimages shape 1 expectedoutputimageshape test batched encodedimages imageprocessor imageinputs returntensorspt inputdataformatchannelsfirst imagemean0 imagestd1 pixelvalues expectedoutputimageshape self imageprocessortester expectedoutputimageshapeimageinputs self assertequal tupleencodedimages shape self imageprocessortester batchsize expectedoutputimageshape coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpify true or a list of pytorch tensors if one specifies torchify true one can specify whether the images are of the same resolution or not to avoid getting image width height 0 if size_divisor is defined the image needs to have width size size_divisor pil expects the channel dimension as last dimension this function prepares a video as a list of pil images numpy arrays pytorch tensors pil expects the channel dimension as last dimension this function prepares a batch of videos a list of list of pil images or a list of list of numpy arrays if one specifies numpify true or a list of list of pytorch tensors if one specifies torchify true one can specify whether the videos are of the same resolution or not initialize image_processor create random pytorch tensors for layoutlm compatiblity try with text image feature initialize image_processing create random pil images test not batched input test batched initialize image_processing create random numpy tensors test not batched input test batched initialize image_processing create random pytorch tensors test not batched input test batched test that can process images which have an arbitrary number of channels initialize image_processing create random numpy tensors test not batched input test batched
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available if is_torch_available(): import numpy as np import torch if is_vision_available(): from PIL import Image def prepare_image_inputs( batch_size, min_resolution, max_resolution, num_channels, size_divisor=None, equal_resolution=False, numpify=False, torchify=False, ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" image_inputs = [] for i in range(batch_size): if equal_resolution: width = height = max_resolution else: if size_divisor is not None: min_resolution = max(size_divisor, min_resolution) width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2) image_inputs.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8)) if not numpify and not torchify: image_inputs = [Image.fromarray(np.moveaxis(image, 0, -1)) for image in image_inputs] if torchify: image_inputs = [torch.from_numpy(image) for image in image_inputs] return image_inputs def prepare_video(num_frames, num_channels, width=10, height=10, numpify=False, torchify=False): video = [] for i in range(num_frames): video.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8)) if not numpify and not torchify: video = [Image.fromarray(np.moveaxis(frame, 0, -1)) for frame in video] if torchify: video = [torch.from_numpy(frame) for frame in video] return video def prepare_video_inputs( batch_size, num_frames, num_channels, min_resolution, max_resolution, equal_resolution=False, numpify=False, torchify=False, ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" video_inputs = [] for i in range(batch_size): if equal_resolution: width = height = max_resolution else: width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2) video = prepare_video( num_frames=num_frames, num_channels=num_channels, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs class ImageProcessingTestMixin: test_cast_dtype = None def test_image_processor_to_json_string(self): image_processor = self.image_processing_class(**self.image_processor_dict) obj = json.loads(image_processor.to_json_string()) for key, value in self.image_processor_dict.items(): self.assertEqual(obj[key], value) def test_image_processor_to_json_file(self): image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "image_processor.json") image_processor_first.to_json_file(json_file_path) image_processor_second = self.image_processing_class.from_json_file(json_file_path) self.assertEqual(image_processor_second.to_dict(), image_processor_first.to_dict()) def test_image_processor_from_and_save_pretrained(self): image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = image_processor_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) image_processor_second = self.image_processing_class.from_pretrained(tmpdirname) self.assertEqual(image_processor_second.to_dict(), image_processor_first.to_dict()) def test_init_without_params(self): image_processor = self.image_processing_class() self.assertIsNotNone(image_processor) @require_torch @require_vision def test_cast_dtype_device(self): if self.test_cast_dtype is not None: image_processor = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) encoding = image_processor(image_inputs, return_tensors="pt") self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.float32) encoding = image_processor(image_inputs, return_tensors="pt").to(torch.float16) self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.float16) encoding = image_processor(image_inputs, return_tensors="pt").to("cpu", torch.bfloat16) self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.bfloat16) with self.assertRaises(TypeError): _ = image_processor(image_inputs, return_tensors="pt").to(torch.bfloat16, "cpu") encoding = image_processor(image_inputs, return_tensors="pt") encoding.update({"input_ids": torch.LongTensor([[1, 2, 3], [4, 5, 6]])}) encoding = encoding.to(torch.float16) self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.float16) self.assertEqual(encoding.input_ids.dtype, torch.long) def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape), ) def test_call_numpy_4_channels(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) encoded_images = image_processor( image_inputs[0], return_tensors="pt", input_data_format="channels_first", image_mean=0, image_std=1, ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) encoded_images = image_processor( image_inputs, return_tensors="pt", input_data_format="channels_first", image_mean=0, image_std=1, ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) )
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the model this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 config is in subfolder the following should not work without specifying the subfolder reset repo push to hub via savepretrained reset repo push to hub via savepretrained this has added the proper automap field to the config can t make an isinstance check because the newimageprocessor is from the customimageprocessor class of a dynamic module coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the model this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 config is in subfolder the following should not work without specifying the subfolder reset repo push to hub via save_pretrained reset repo push to hub via save_pretrained this has added the proper auto_map field to the config can t make an isinstance check because the new_image_processor is from the customimageprocessor class of a dynamic module
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor SAMPLE_IMAGE_PROCESSING_CONFIG_DIR = get_tests_dir("fixtures") class ImageProcessorUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} _ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") mock_head.assert_called() def test_legacy_load_from_url(self): _ = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def test_image_processor_from_pretrained_subfolder(self): with self.assertRaises(OSError): _ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants") config = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor" ) self.assertIsNotNone(config) @is_staging_test class ImageProcessorPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-image-processor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-image-processor") except HTTPError: pass def test_push_to_hub(self): image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub("test-image-processor", token=self._token) new_image_processor = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) delete_repo(token=self._token, repo_id="test-image-processor") with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( tmp_dir, repo_id="test-image-processor", push_to_hub=True, token=self._token ) new_image_processor = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_in_organization(self): image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub("valid_org/test-image-processor", token=self._token) new_image_processor = ViTImageProcessor.from_pretrained("valid_org/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) delete_repo(token=self._token, repo_id="valid_org/test-image-processor") with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( tmp_dir, repo_id="valid_org/test-image-processor-org", push_to_hub=True, token=self._token ) new_image_processor = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org") for k, v in image_processor.__dict__.items(): self.assertEqual(v, getattr(new_image_processor, k)) def test_push_to_hub_dynamic_image_processor(self): CustomImageProcessor.register_for_auto_class() image_processor = CustomImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR) image_processor.push_to_hub("test-dynamic-image-processor", token=self._token) self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) new_image_processor = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=True ) self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make sure image is correctly rescaled make sure image is correctly rescaled make sure that an exception is raised if image is not in 0 1 make sure binary mask remains a binary mask channelsfirst channelslast channels first channels last channel first channel last test that function doesn t reorder if channel dim matches the input test that function reorders if channel dim doesn t match the input can pass in inputdataformat and works if data format is ambiguous or unknown test the output size defaults to x x if an int is given test the output size is the same as the input if a two element tuplelist is given to match pytorch behaviour maxsize is only relevant if size is an int test output size intsize height width size if size is an int and height width test output size size intsize width height if size is an int and width height test size is resized if longer size maxsize test output size intsize height width size if size is an int and height width and input has 4 channels test correct channel dimension is returned if output size if height 3 defaults to input format channels first defaults to input format channels last check the channel order is the same by default check channel order is changed if specified check pil image image is returned if returnnumpyfalse pil size is in width height order check an image with float values between 01 is returned with values in this range check that an image with 4 channels is resized correctly test that exception is raised if inputs are incorrect not a numpy array image number of mean values number of channels number of std values number of channels test result is correct output data format is channelsfirst and normalization correctly computed test image with 4 channels is normalized correctly test float32 image input keeps float32 dtype test float16 image input keeps float16 dtype the mean and std are cast to match the dtype of the input image test int image input is converted to float32 test that exception is raised if inputs are incorrect test result is correct output data format is channelsfirst and center crop correctly computed test that image is padded with zeros if crop size is larger than image size test image with 4 channels is cropped correctly check that the function and inverse function are inverse of each other check that the function and inverse function are inverse of each other test list input test numpy array input test int input test array input fmt off fmt on test that exception is raised if unknown padding mode is specified test that exception is raised if invalid padding is specified cannot pad on channel dimension test image is padded equally on all sides is padding is an int fmt off fmt on test the left and right of each axis is padded padleft padright fmt off fmt on test only one axis is padded padleft padright fmt off fmt on test padding with a constant value fmt off fmt on fmt off fmt on test padding with paddingmode reflect fmt off fmt on test padding with paddingmode replicate fmt off fmt on test padding with paddingmode symmetric fmt off fmt on test we can specify the output data format test padding with paddingmode reflect fmt off fmt on test we can pad on an image with 2 channels fmt off fmt on test that an rgba image is converted to rgb for the moment numpy images are returned as is and pil images are converted test that a grayscale image is converted to rgb fmt off fmt on fmt off fmt on can flip when the image has 2 channels fmt off fmt on coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make sure image is correctly rescaled make sure image is correctly rescaled make sure that an exception is raised if image is not in 0 1 make sure binary mask remains a binary mask channels_first channels_last channels first channels last channel first channel last test that function doesn t reorder if channel dim matches the input test that function reorders if channel dim doesn t match the input can pass in input_data_format and works if data format is ambiguous or unknown test the output size defaults to x x if an int is given test the output size is the same as the input if a two element tuple list is given to match pytorch behaviour max_size is only relevant if size is an int test output size int size height width size if size is an int and height width test output size size int size width height if size is an int and width height test size is resized if longer size max_size test output size int size height width size if size is an int and height width and input has 4 channels test correct channel dimension is returned if output size if height 3 defaults to input format channels first defaults to input format channels last check the channel order is the same by default check channel order is changed if specified check pil image image is returned if return_numpy false pil size is in width height order check an image with float values between 0 1 is returned with values in this range check that an image with 4 channels is resized correctly test that exception is raised if inputs are incorrect not a numpy array image number of mean values number of channels number of std values number of channels test result is correct output data format is channels_first and normalization correctly computed test image with 4 channels is normalized correctly test float32 image input keeps float32 dtype test float16 image input keeps float16 dtype the mean and std are cast to match the dtype of the input image test int image input is converted to float32 test that exception is raised if inputs are incorrect test result is correct output data format is channels_first and center crop correctly computed test that image is padded with zeros if crop size is larger than image size test image with 4 channels is cropped correctly check that the function and inverse function are inverse of each other check that the function and inverse function are inverse of each other test list input test numpy array input test int input test array input fmt off fmt on test that exception is raised if unknown padding mode is specified test that exception is raised if invalid padding is specified cannot pad on channel dimension test image is padded equally on all sides is padding is an int fmt off fmt on test the left and right of each axis is padded pad_left pad_right fmt off fmt on test only one axis is padded pad_left pad_right fmt off fmt on test padding with a constant value fmt off fmt on fmt off fmt on test padding with paddingmode reflect fmt off fmt on test padding with paddingmode replicate fmt off fmt on test padding with paddingmode symmetric fmt off fmt on test we can specify the output data format test padding with paddingmode reflect fmt off fmt on test we can pad on an image with 2 channels fmt off fmt on test that an rgba image is converted to rgb for the moment numpy images are returned as is and pil images are converted test that a grayscale image is converted to rgb fmt off fmt on fmt off fmt on can flip when the image has 2 channels fmt off fmt on
import unittest import numpy as np from parameterized import parameterized from transformers.testing_utils import require_flax, require_tf, require_torch, require_vision from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax if is_vision_available(): import PIL.Image from transformers.image_transforms import ( center_crop, center_to_corners_format, convert_to_rgb, corners_to_center_format, flip_channel_order, get_resize_output_image_size, id_to_rgb, normalize, pad, resize, rgb_to_id, to_channel_dimension_format, to_pil_image, ) def get_random_image(height, width, num_channels=3, channels_first=True): shape = (num_channels, height, width) if channels_first else (height, width, num_channels) random_array = np.random.randint(0, 256, shape, dtype=np.uint8) return random_array @require_vision class ImageTransformsTester(unittest.TestCase): @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float64), ("numpy_int_channels_first", (3, 4, 5), np.int32), ("numpy_uint_channels_first", (3, 4, 5), np.uint8), ] ) @require_vision def test_to_pil_image(self, name, image_shape, dtype): image = np.random.randint(0, 256, image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float64), ] ) @require_vision def test_to_pil_image_from_float(self, name, image_shape, dtype): image = np.random.rand(*image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) image = np.random.randn(*image_shape).astype(dtype) with self.assertRaises(ValueError): to_pil_image(image) @require_vision def test_to_pil_image_from_mask(self): image = np.random.randint(0, 2, (3, 4, 5)).astype(np.uint8) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) image = np.random.randint(0, 2, (3, 4, 5)).astype(np.float32) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) @require_tf def test_to_pil_image_from_tensorflow(self): image = tf.random.uniform((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) image = tf.random.uniform((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_torch def test_to_pil_image_from_torch(self): image = torch.rand((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) image = torch.rand((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_flax def test_to_pil_image_from_jax(self): key = jax.random.PRNGKey(0) image = jax.random.uniform(key, (3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) image = jax.random.uniform(key, (4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) def test_to_channel_dimension_format(self): image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) image = np.random.rand(4, 5, 6) image = to_channel_dimension_format(image, "channels_first", input_channel_dim="channels_last") self.assertEqual(image.shape, (6, 4, 5)) def test_get_resize_output_image_size(self): image = np.random.randint(0, 256, (3, 224, 224)) self.assertEqual(get_resize_output_image_size(image, 10), (10, 10)) self.assertEqual(get_resize_output_image_size(image, [10]), (10, 10)) self.assertEqual(get_resize_output_image_size(image, (10,)), (10, 10)) self.assertEqual(get_resize_output_image_size(image, (10, 20)), (10, 20)) self.assertEqual(get_resize_output_image_size(image, [10, 20]), (10, 20)) self.assertEqual(get_resize_output_image_size(image, (10, 20), default_to_square=True), (10, 20)) self.assertEqual(get_resize_output_image_size(image, (10, 20), max_size=5), (10, 20)) image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (25, 20)) image = np.random.randint(0, 256, (3, 40, 50)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (20, 25)) image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False, max_size=22), (22, 17)) image = np.random.randint(0, 256, (4, 50, 40)) self.assertEqual( get_resize_output_image_size(image, 20, default_to_square=False, input_data_format="channels_first"), (25, 20), ) image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 3, 20)) image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20), data_format="channels_last") self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20), data_format="channels_first") self.assertEqual(resized_image.shape, (3, 3, 20)) def test_resize(self): image = np.random.randint(0, 256, (3, 224, 224)) resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) resized_image = resize(image, (30, 40), data_format="channels_last") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (30, 40, 3)) resized_image = resize(image, (30, 40), return_numpy=False) self.assertIsInstance(resized_image, PIL.Image.Image) self.assertEqual(resized_image.size, (40, 30)) image = np.random.rand(3, 224, 224) resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) self.assertTrue(np.all(resized_image >= 0)) self.assertTrue(np.all(resized_image <= 1)) image = np.random.randint(0, 256, (4, 224, 224)) resized_image = resize(image, (30, 40), input_data_format="channels_first") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (4, 30, 40)) def test_normalize(self): image = np.random.randint(0, 256, (224, 224, 3)) / 255 with self.assertRaises(ValueError): normalize(5, 5, 5) with self.assertRaises(ValueError): normalize(image, mean=(0.5, 0.6), std=1) with self.assertRaises(ValueError): normalize(image, mean=1, std=(0.5, 0.6)) mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).transpose((2, 0, 1)) normalized_image = normalize(image, mean=mean, std=std, data_format="channels_first") self.assertIsInstance(normalized_image, np.ndarray) self.assertEqual(normalized_image.shape, (3, 224, 224)) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) image = np.random.randint(0, 256, (224, 224, 4)) / 255 mean = (0.5, 0.6, 0.7, 0.8) std = (0.1, 0.2, 0.3, 0.4) expected_image = (image - mean) / std self.assertTrue( np.allclose( normalize(image, mean=mean, std=std, input_data_format="channels_last"), expected_image, atol=1e-6 ) ) image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float32) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).astype(np.float32) normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float16) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) cast_mean = np.array(mean, dtype=np.float16) cast_std = np.array(std, dtype=np.float16) expected_image = (image - cast_mean) / cast_std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float16) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) image = np.random.randint(0, 2, (224, 224, 3), dtype=np.uint8) mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = (image.astype(np.float32) - mean) / std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) def test_center_crop(self): image = np.random.randint(0, 256, (3, 224, 224)) with self.assertRaises(ValueError): center_crop(image, 10) expected_image = image[:, 52:172, 82:142].transpose(1, 2, 0) cropped_image = center_crop(image, (120, 60), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (120, 60, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) expected_image = np.zeros((300, 260, 3)) expected_image[38:262, 18:242, :] = image.transpose((1, 2, 0)) cropped_image = center_crop(image, (300, 260), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (300, 260, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) image = np.random.randint(0, 256, (224, 224, 4)) expected_image = image[52:172, 82:142, :] self.assertTrue(np.allclose(center_crop(image, (120, 60), input_data_format="channels_last"), expected_image)) def test_center_to_corners_format(self): bbox_center = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) expected = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) self.assertTrue(np.allclose(center_to_corners_format(bbox_center), expected)) self.assertTrue(np.allclose(corners_to_center_format(center_to_corners_format(bbox_center)), bbox_center)) def test_corners_to_center_format(self): bbox_corners = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) expected = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) self.assertTrue(np.allclose(corners_to_center_format(bbox_corners), expected)) self.assertTrue(np.allclose(center_to_corners_format(corners_to_center_format(bbox_corners)), bbox_corners)) def test_rgb_to_id(self): rgb = [125, 4, 255] self.assertEqual(rgb_to_id(rgb), 16712829) color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) expected = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) self.assertTrue(np.allclose(rgb_to_id(color), expected)) def test_id_to_rgb(self): self.assertEqual(id_to_rgb(16712829), [125, 4, 255]) id_array = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) self.assertTrue(np.allclose(id_to_rgb(id_array), color)) def test_pad(self): image = np.array([[ [0, 1], [2, 3], ]]) with self.assertRaises(ValueError): pad(image, 10, mode="unknown") with self.assertRaises(ValueError): pad(image, (5, 10, 10)) expected_image = np.array([ [[0, 0, 0, 0], [0, 0, 1, 0], [0, 2, 3, 0], [0, 0, 0, 0]], ]) self.assertTrue(np.allclose(expected_image, pad(image, 1))) expected_image = np.array( [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 2, 3, 0], [0, 0, 0, 0, 0]]) self.assertTrue(np.allclose(expected_image, pad(image, (2, 1)))) expected_image = np.array([[ [9, 9], [9, 9], [0, 1], [2, 3], [9, 9] ]]) self.assertTrue(np.allclose(expected_image, pad(image, ((2, 1), (0, 0)), constant_values=9))) expected_image = np.array([[ [8, 8, 0, 1, 9], [8, 8, 2, 3, 9], [8, 8, 7, 7, 9], [8, 8, 7, 7, 9] ]]) self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), constant_values=((6, 7), (8, 9))))) image = np.array([[ [0, 1, 2], [3, 4, 5], [6, 7, 8], ]]) expected_image = np.array([[ [2, 1, 0, 1, 2, 1], [5, 4, 3, 4, 5, 4], [8, 7, 6, 7, 8, 7], [5, 4, 3, 4, 5, 4], [2, 1, 0, 1, 2, 1], ]]) self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect"))) expected_image = np.array([[ [0, 0, 0, 1, 2, 2], [3, 3, 3, 4, 5, 5], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], ]]) self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="replicate"))) expected_image = np.array([[ [1, 0, 0, 1, 2, 2], [4, 3, 3, 4, 5, 5], [7, 6, 6, 7, 8, 8], [7, 6, 6, 7, 8, 8], [4, 3, 3, 4, 5, 5], ]]) self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="symmetric"))) image = np.array([[ [0, 1], [2, 3], ]]) expected_image = np.array([ [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]], [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]] ]) self.assertTrue( np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect", data_format="channels_last")) ) image = np.array([ [[0, 1], [2, 3]], ]) expected_image = np.array([ [[0, 0], [0, 1], [2, 3]], [[0, 0], [0, 0], [0, 0]], ]) self.assertTrue( np.allclose( expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last") ) ) @require_vision def test_convert_to_rgb(self): image = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "RGBA") self.assertEqual(pil_image.size, (2, 1)) rgb_image = convert_to_rgb(image) self.assertEqual(rgb_image.shape, (1, 2, 4)) self.assertTrue(np.allclose(rgb_image, image)) rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[1, 2, 3], [5, 6, 7]]], dtype=np.uint8))) image = np.array([[0, 255]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "L") self.assertEqual(pil_image.size, (2, 1)) rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))) def test_flip_channel_order(self): img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[16, 17, 18, 19], [20, 21, 22, 23]], ]) img_channels_last = np.moveaxis(img_channels_first, 0, -1) flipped_img_channels_first = np.array([ [[16, 17, 18, 19], [20, 21, 22, 23]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], ]) flipped_img_channels_last = np.moveaxis(flipped_img_channels_first, 0, -1) self.assertTrue(np.allclose(flip_channel_order(img_channels_first), flipped_img_channels_first)) self.assertTrue( np.allclose(flip_channel_order(img_channels_first, "channels_last"), flipped_img_channels_last) ) self.assertTrue(np.allclose(flip_channel_order(img_channels_last), flipped_img_channels_last)) self.assertTrue( np.allclose(flip_channel_order(img_channels_last, "channels_first"), flipped_img_channels_first) ) img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], ]) flipped_img_channels_first = img_channels_first[::-1, :, :] self.assertTrue( np.allclose( flip_channel_order(img_channels_first, input_data_format="channels_first"), flipped_img_channels_first ) )
codingutf8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license use the first letter of the name to get a value and go from a 13 to z 12 prune heads if needed initialize weights tie weights should be skipped when not initializing all weights since frompretrained calls tie weights anyways make sure we don t have nans the config file and the generation config file if it can generate should be saved check the keys are in the original statedict check that certain keys didn t get saved with the model test we can load the state dict in the model necessary for the checkpointing api in trainer at init model should have gradient checkpointing disabled check enable works loop over all modules and check that relevant modules have gradientcheckpointing set to true check disable works loop over all modules and check that relevant modules have gradientcheckpointing set to false make a copy of model class to not break future tests from https stackoverflow comquestions9541025howtocopyapythonclass make sure that all keys are expected for test make init deterministic but make sure that noninitialized weights throw errors nevertheless this will often delete a single weight of a multiweight module to test an edge case check that certain keys didn t get saved with the model before we test anything make a copy of model class to not break future tests from https stackoverflow comquestions9541025howtocopyapythonclass make sure that all keys are expected for test make init deterministic but make sure that noninitialized weights throw errors nevertheless this will often delete a single weight of a multiweight module to test an edge case check that certain keys didn t get saved with the model signature parameters is an ordereddict so argnames order is deterministic unfreeze additional layers scenario 1 default behaviour scenario 2 with usereentranttrue this is the default value that is used in pytorch s torch utils checkpoint checkpoint scenario 3 with usereentrantfalse pytorch suggests users to use this value for future releases https pytorch orgdocsstablecheckpoint html check that outputattentions also work using config loss is at first position question answering model returns startlogits and endlogits decoder attentions cross attentions check attention is always last and order is fine this is copied from torchtestinginternaljitutils py clearclassregistry torch 1 8 has no clearclassstate in torch jit state avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb test that the model can be serialized and restored properly avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb prepare headmask set requiregrad after having prepared the tensor to avoid error leaf variable has been moved into the graph interior test that we can get a gradient back for importance score computation remove nan todo to have this check we will need at least 3 layers do we really need it self assertequalattentions1 shape3 self modeltester numattentionheads todo to have this check we will need at least 3 layers do we really need it self assertequalattentions1 shape3 self modeltester numattentionheads todo to have this check we will need at least 3 layers do we really need it self assertequalattentions1 shape3 self modeltester numattentionheads check that outputhiddenstates also work using config no need to test all models as different heads yield the same functionality seq2seq models encoderdecoderonly models retrieve the embeddings and clone theme check that resizing the position embeddings with a larger maxpositionembeddings increases the model s postion embeddings size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the position embeddings with a smaller maxpositionembeddings decreases the model s maxpositionembeddings check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that adding and removing tokens has not modified the first part of the embedding matrix retrieve the embeddings and clone theme check that resizing the token embeddings with a larger vocab size increases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary make sure that decoderinputids are resized as well check that adding and removing tokens has not modified the first part of the embedding matrix check that resizing a model to a multiple of padtomultiple leads to a model of exactly that size if model cannot untied embeddings leave test if no output embeddings leave test check that resizing the token embeddings with a larger vocab size increases the model s vocab size check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check bias if present check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary check that the model can still do a forward pass successfully every parameter should be resized the main input is the name of the argument after self some models define this as none in that case we are on a head model but every single key is not actual parameters and this is tested in testtiedmodelweightskeyignore test check that the embedding layer and decoding layer are the same in size and in value self asserttruechecksamevaluesembeddings decoding check that after modification they remain the same embeddings weight data div2 check that the embedding layer and decoding layer are the same in size and in value self asserttrueembeddings weight shape decoding weight shape self asserttruechecksamevaluesembeddings decoding check that after modification they remain the same decoding weight data div4 check that the embedding layer and decoding layer are the same in size and in value self asserttrueembeddings weight shape decoding weight shape self asserttruechecksamevaluesembeddings decoding check that after resize they remain tied decoding weight data mul20 check that the embedding layer and decoding layer are the same in size and in value self asserttruemodel transformer wte weight shape model lmhead weight shape self asserttruechecksamevaluesmodel transformer wte model lmhead checking the state dicts are correct checking there was no complain of missing weights checking the tensor sharing are correct checking the state dicts are correct checking there was no complain of missing weights these are all the pointers of shared tensors detect we get a hit for each key removed tied weights found from tied params there should only be one left after we are nuking all weights on file so every parameter should yell on load we re going to detect if we yell too much or too little remove tied weights from extra missing they are normally not warned as missing if their tied counterpart is present but here there are no weights at all so we do get the warning we remove the group from extramissing if not all weights from group are in it remove nonpersistent buffers from missedmissing don t copy this method to model specific test file todo remove this method once the issues are all fixed make sure no sequence has all zeros as attention mask for k in attentionmask encoderattentionmask decoderattentionmask if k in inputsdict attentionmask inputsdictk make sure no all 0s attention masks to avoid failure at this moment put 1 at the beginning of sequences to make it still work when combining causal attention masks todo remove this line once a fix regarding large negative values for attention mask is done attentionmask torch cat torch oneslikeattentionmask 1 dtypeattentionmask dtype attentionmask 1 dim1 here we make the first sequence with all 0s as attention mask currently this will fail for tfwav2vec2model this is caused by the different large negative values like 1e4 1e9 1e30 and inf for attention mask across modelsframeworks todo enable this block once the large negative values thing is cleaned up see https github comhuggingfacetransformersissues14859 attentionmask torch cat torch zeroslikeattentionmask 1 dtypeattentionmask dtype attentionmask1 dim0 inputsdictk attentionmask don t copy this method to model specific test file todo remove this method once the issues are all fixed def postprocessingtoignoretestcasesself tfoutputs ptoutputs modelclass tfgpt2 has pastkeyvalues as a tensor while gpt2 has it as a tuple create new outputs from the remaining fields copied from tests testmodelingtfcommon tfmodeltestermixin checkpttfoutputs check the outputs from pytorch and tensorflow models are close enough checks are done in a recursive way args modelclass the class of the model that is currently testing for example tfbertmodel tfbertformaskedlm tfbertforsequenceclassification etc mainly used for providing more informative error messages name str the name of the output for example output hiddenstates output attentions etc attributes tuplestr the names of the output s element if the output is a tuplelist with each element being a named field in the output allow modeloutput e g clipoutput has textmodeloutput and visionmodeloutput don t copy this block to model specific test file todo remove this method and this line after issues are fixed convert to the case of tuple appending each key to the current string name allow list e g transfoxlmodeloutput mems is a list of tensors case 1 each output has assigned name e g a tuple form of a modeloutput case 2 each output has no assigned name e g hidden states of each layer add an index to name deal with numpy s scalars to make replacing nan values by 0 work skip key that does not exist in tf other general float inputs send pytorch inputs to the correct device send pytorch model to the correct device check predictions on first output logitshiddenstates are close enough given lowlevel computational differences tf models returned loss is usually a tensor rather than a scalar see hfcomputeloss it uses tf keras losses reduction none change it here to a scalar to match pytorch models loss transformers does not have this model in tf version yet output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e4 1e9 1e30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it not all models accept labels in the forward pass yet make sure only tf inputs are forward that actually exist in function args remove all head masks for some models e g base models there is no label returned set the input dict to none to avoid check outputs twice for the same input dicts check we can load pt model in tf and viceversa with model model functions here requires tfinputsdict to build tfmodel original test check without labels check with labels check we can load pt model in tf and viceversa with checkpoint model functions original test check without labels check with labels args modelclass the class of the model that is currently testing for example etc currently unused but it could make debugging easier and faster names a string or a list of strings these specify what fxoutputsptoutputs represent in the model outputs currently unused but in the future we could use this information to make the error message clearer by giving the names of the output tensors with large differences between pt and flax allow modeloutput e g clipoutput has textmodeloutput and visionmodeloutput convert to the case of tuple appending each key to the current string name allow list e g transfoxlmodeloutput mems is a list of tensors case 1 each output has assigned name e g a tuple form of a modeloutput case 2 each output has no assigned name e g hidden states of each layer add an index to name using np asarray gives valueerror assignment destination is readonly at the line fxoutputsfxnans 0 deal with numpy s scalars to make replacing nan values by 0 work no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax send pytorch model to the correct device no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax make sure weights are tied in pytorch send pytorch model to the correct device send pytorch model to the correct device some params shouldn t be scattered by nn dataparallel so just remove them if they are present move input tensors to cuda o wrap model in nn dataparallel a candidate for testingutils returns a list of cuda memory allocations per gpu in mbs perdevicememory for id in rangetorch cuda devicecount with torch cuda deviceid perdevicememory appendtorch cuda memoryallocated 20 return perdevicememory needs a large model to see the difference config self modeltester getlargemodelconfig for modelclass in self allparallelizablemodelclasses torch cuda emptycache 1 single gpu memory load unload memory measurements retrieve initial memory usage can easily be 0 61 5gb if cudakernels have been preloaded by previous tests memoryatstart getcurrentgpumemoryuse put model on device 0 and take a memory snapshot model modelclassconfig model tocuda 0 memoryaftermodelload getcurrentgpumemoryuse the memory use on device 0 should be higher than it was initially self assertgreatermemoryaftermodelload0 memoryatstart0 del model gc collect torch cuda emptycache 2 mp test it s essential to recalibrate the usage before the next stage memoryatstart getcurrentgpumemoryuse spread model layers over multiple devices model modelclassconfig model parallelize memoryafterparallelization getcurrentgpumemoryuse assert that the memory use on all devices is higher than it was when loaded only on cpu for n in rangelenmodel devicemap keys self assertgreatermemoryafterparallelizationn memoryatstartn assert that the memory use of device 0 is lower than it was when the entire model was loaded on it self assertlessmemoryafterparallelization0 memoryaftermodelload0 assert that the memory use of device 1 is higher than it was when the entire model was loaded on device 0 and device 1 wasn t used at all self assertgreatermemoryafterparallelization1 memoryaftermodelload1 del model gc collect torch cuda emptycache requiretorchmultigpu def testmodelparallelequalresultsself if not self testmodelparallel return config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allparallelizablemodelclasses inputsdict self prepareforclassinputsdict modelclass def casttodevicedictionary device output for k v in dictionary items if isinstancev torch tensor outputk v todevice else outputk v return output model modelclassconfig output modelcasttodeviceinputsdict cpu model parallelize paralleloutput modelcasttodeviceinputsdict cuda 0 for value parallelvalue in zipoutput paralleloutput if isinstancevalue torch tensor self asserttruetorch allclosevalue parallelvalue tocpu atol1e7 elif isinstancevalue tuple list for value parallelvalue in zipvalue parallelvalue self asserttruetorch allclosevalue parallelvalue tocpu atol1e7 def checkdevicemapisrespectedself model devicemap for paramname param in model namedparameters find device in devicemap while lenparamname 0 and paramname not in devicemap paramname joinparamname split 1 if paramname not in devicemap raise valueerrordevice map is incomplete it does not contain any device for paramname paramdevice devicemapparamname if paramdevice in cpu disk self assertequalparam device torch devicemeta else self assertequalparam device torch deviceparamdevice requireaccelerate mark acceleratetests requiretorchgpu def testdiskoffloadbinself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses if modelclass nosplitmodules is none continue inputsdictclass self prepareforclassinputsdict modelclass model modelclassconfig eval model model totorchdevice torch manualseed0 baseoutput modelinputsdictclass modelsize computemodulesizesmodel with tempfile temporarydirectory as tmpdir model cpu savepretrainedtmpdir safeserializationfalse with self assertraisesvalueerror maxsize intself modelsplitpercents0 modelsize maxmemory 0 maxsize cpu maxsize this errors out cause it s missing an offload folder newmodel modelclass frompretrainedtmpdir devicemapauto maxmemorymaxmemory maxsize intself modelsplitpercents1 modelsize maxmemory 0 maxsize cpu maxsize newmodel modelclass frompretrained tmpdir devicemapauto maxmemorymaxmemory offloadfoldertmpdir self checkdevicemapisrespectednewmodel newmodel hfdevicemap torch manualseed0 newoutput newmodelinputsdictclass self asserttruetorch allclosebaseoutput0 newoutput0 atol1e5 requireaccelerate mark acceleratetests requiretorchgpu def testdiskoffloadsafetensorsself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses if modelclass nosplitmodules is none continue inputsdictclass self prepareforclassinputsdict modelclass model modelclassconfig eval model model totorchdevice torch manualseed0 baseoutput modelinputsdictclass modelsize computemodulesizesmodel with tempfile temporarydirectory as tmpdir model cpu savepretrainedtmpdir maxsize intself modelsplitpercents1 modelsize maxmemory 0 maxsize cpu maxsize this doesn t error out as it s in safetensors and doesn t need an offload folder newmodel modelclass frompretrainedtmpdir devicemapauto maxmemorymaxmemory self checkdevicemapisrespectednewmodel newmodel hfdevicemap torch manualseed0 newoutput newmodelinputsdictclass self asserttruetorch allclosebaseoutput0 newoutput0 atol1e5 requireaccelerate mark acceleratetests requiretorchgpu def testcpuoffloadself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses if modelclass nosplitmodules is none continue inputsdictclass self prepareforclassinputsdict modelclass model modelclassconfig eval model model totorchdevice torch manualseed0 baseoutput modelinputsdictclass modelsize computemodulesizesmodel we test several splits of sizes to make sure it works maxgpusizes intp modelsize for p in self modelsplitpercents1 with tempfile temporarydirectory as tmpdir model cpu savepretrainedtmpdir for maxsize in maxgpusizes maxmemory 0 maxsize cpu modelsize 2 newmodel modelclass frompretrainedtmpdir devicemapauto maxmemorymaxmemory making sure part of the model will actually end up offloaded self assertsetequalsetnewmodel hfdevicemap values 0 cpu self checkdevicemapisrespectednewmodel newmodel hfdevicemap torch manualseed0 newoutput newmodelinputsdictclass self asserttruetorch allclosebaseoutput0 newoutput0 atol1e5 requireaccelerate mark acceleratetests requiretorchmultigpu def testmodelparallelismself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses if modelclass nosplitmodules is none continue inputsdictclass self prepareforclassinputsdict modelclass model modelclassconfig eval model model totorchdevice torch manualseed0 baseoutput modelinputsdictclass modelsize computemodulesizesmodel we test several splits of sizes to make sure it works maxgpusizes intp modelsize for p in self modelsplitpercents1 with tempfile temporarydirectory as tmpdir model cpu savepretrainedtmpdir for maxsize in maxgpusizes maxmemory 0 maxsize 1 modelsize 2 cpu modelsize 2 newmodel modelclass frompretrainedtmpdir devicemapauto maxmemorymaxmemory making sure part of the model will actually end up offloaded self assertsetequalsetnewmodel hfdevicemap values 0 1 self checkdevicemapisrespectednewmodel newmodel hfdevicemap torch manualseed0 newoutput newmodelinputsdictclass self asserttruetorch allclosebaseoutput0 newoutput0 atol1e5 def testproblemtypesself config inputsdict self modeltester prepareconfigandinputsforcommon problemtypes title multilabelclassification numlabels 2 dtype torch float title singlelabelclassification numlabels 1 dtype torch long title regression numlabels 1 dtype torch float for modelclass in self allmodelclasses if modelclass name not in getvaluesmodelforsequenceclassificationmappingnames getvaluesmodelforimageclassificationmappingnames continue for problemtype in problemtypes with self subtestmsgftesting modelclass with problemtype title config problemtype problemtypetitle config numlabels problemtypenumlabels model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue if problemtypenumlabels 1 inputslabels inputslabels unsqueeze1 repeat1 problemtypenumlabels inputslabels inputslabels toproblemtypedtype this tests that we do not trigger the warning form pytorch using a target size that is different to the input size this will likely lead to incorrect results due to broadcasting please ensure they have the same size which is a symptom something in wrong for the regression problem see https github comhuggingfacetransformersissues11780 with warnings catchwarningsrecordtrue as warninglist loss modelinputs loss for w in warninglist if using a target size that is different to the input size in strw message raise valueerror fsomething is going wrong in the regression problem intercepted w message loss backward def testloadwithmismatchedshapesself if not self testmismatchedshapes return config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses if modelclass name not in getvaluesmodelforsequenceclassificationmappingnames continue with self subtestmsgftesting modelclass with tempfile temporarydirectory as tmpdir model modelclassconfig model savepretrainedtmpdir fails when we don t set ignoremismatchedsizestrue with self assertraisesruntimeerror newmodel automodelforsequenceclassification frompretrainedtmpdir numlabels42 with self assertraisesruntimeerror newmodelwithoutprefix automodel frompretrainedtmpdir vocabsize10 logger logging getloggertransformers modelingutils with captureloggerlogger as cl newmodel automodelforsequenceclassification frompretrained tmpdir numlabels42 ignoremismatchedsizestrue self assertinthe shapes did not match cl out newmodel totorchdevice inputs self prepareforclassinputsdict modelclass logits newmodelinputs logits self assertequallogits shape1 42 with captureloggerlogger as cl newmodelwithoutprefix automodel frompretrained tmpdir vocabsize10 ignoremismatchedsizestrue self assertinthe shapes did not match cl out inputids idstensor2 8 10 newmodelwithoutprefix totorchdevice if self isencoderdecoder newmodelwithoutprefixinputids decoderinputidsinputids else newmodelwithoutprefixinputids def testmodelissmallself just a consistency check to make sure we are not running tests on 80m parameter models config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig numparams model numparameters assert numparams 1000000 fmodelclass is too big for the common tests numparams it should have 1m max requireflashattn requiretorchgpu mark flashattntest slow def testflashattn2conversionself import torch config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses if not modelclass supportsflashattn2 self skiptestfmodelclass name does not support flash attention 2 model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model modelclass frompretrained tmpdirname torchdtypetorch float16 useflashattention2true totorchdevice for module in model namedmodules if flashattention in module class name return self asserttruefalse flashattention2 modules not found in model requireflashattn requiretorchgpu mark flashattntest slow def testflashattn2inferenceself import torch for modelclass in self allmodelclasses if not modelclass supportsflashattn2 self skiptestfmodelclass name does not support flash attention 2 config inputsdict self modeltester prepareconfigandinputsforcommon model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname modelfa modelclass frompretrained tmpdirname torchdtypetorch bfloat16 useflashattention2true modelfa totorchdevice model modelclass frompretrained tmpdirname torchdtypetorch bfloat16 useflashattention2false model totorchdevice dummyinput inputsdictmodel maininputname 1 if dummyinput dtype in torch float32 torch float16 dummyinput dummyinput totorch bfloat16 dummyattentionmask inputsdict getattentionmask none if dummyattentionmask is not none dummyattentionmask dummyattentionmask 1 dummyattentionmask 1 1 dummyattentionmask 1 0 if model config isencoderdecoder decoderinputids inputsdict getdecoderinputids dummyinput 1 outputs modeldummyinput decoderinputidsdecoderinputids outputhiddenstatestrue outputsfa modelfadummyinput decoderinputidsdecoderinputids outputhiddenstatestrue else outputs modeldummyinput outputhiddenstatestrue outputsfa modelfadummyinput outputhiddenstatestrue logits outputs hiddenstates1 if not model config isencoderdecoder else outputs decoderhiddenstates1 logitsfa outputsfa hiddenstates1 if not model config isencoderdecoder else outputsfa decoderhiddenstates1 assert torch allcloselogitsfa logits atol4e2 rtol4e2 if model config isencoderdecoder otherinputs decoderinputids decoderinputids decoderattentionmask dummyattentionmask outputhiddenstates true if dummyattentionmask is not none otherinputsattentionmask dummyattentionmask outputs modeldummyinput otherinputs outputsfa modelfadummyinput otherinputs else otherinputs outputhiddenstates true if dummyattentionmask is not none otherinputsattentionmask dummyattentionmask outputs modeldummyinput otherinputs outputsfa modelfadummyinput otherinputs logits outputs hiddenstates1 if not model config isencoderdecoder else outputs decoderhiddenstates1 logitsfa outputsfa hiddenstates1 if not model config isencoderdecoder else outputsfa decoderhiddenstates1 assert torch allcloselogitsfa1 logits1 atol4e2 rtol4e2 check with inference dropout model train modelfadummyinput otherinputs requireflashattn requiretorchgpu mark flashattntest slow def testflashattn2inferencepaddingrightself import torch for modelclass in self allmodelclasses if not modelclass supportsflashattn2 self skiptestfmodelclass name does not support flash attention 2 config inputsdict self modeltester prepareconfigandinputsforcommon model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname modelfa modelclass frompretrained tmpdirname torchdtypetorch bfloat16 useflashattention2true modelfa totorchdevice model modelclass frompretrained tmpdirname torchdtypetorch bfloat16 useflashattention2false model totorchdevice dummyinput inputsdictmodel maininputname 1 if dummyinput dtype in torch float32 torch float16 dummyinput dummyinput totorch bfloat16 dummyattentionmask inputsdict getattentionmask none if dummyattentionmask is not none dummyattentionmask dummyattentionmask 1 dummyattentionmask 1 1 dummyattentionmask 1 0 if model config isencoderdecoder decoderinputids inputsdict getdecoderinputids dummyinput 1 outputs modeldummyinput decoderinputidsdecoderinputids outputhiddenstatestrue outputsfa modelfadummyinput decoderinputidsdecoderinputids outputhiddenstatestrue else outputs modeldummyinput outputhiddenstatestrue outputsfa modelfadummyinput outputhiddenstatestrue logits outputs hiddenstates1 if not model config isencoderdecoder else outputs decoderhiddenstates1 logitsfa outputsfa hiddenstates1 if not model config isencoderdecoder else outputsfa decoderhiddenstates1 assert torch allcloselogitsfa logits atol4e2 rtol4e2 if model config isencoderdecoder otherinputs decoderinputids decoderinputids decoderattentionmask dummyattentionmask outputhiddenstates true if dummyattentionmask is not none otherinputsattentionmask dummyattentionmask outputs modeldummyinput otherinputs outputsfa modelfadummyinput otherinputs else otherinputs outputhiddenstates true if dummyattentionmask is not none otherinputsattentionmask dummyattentionmask outputs modeldummyinput otherinputs outputsfa modelfadummyinput otherinputs logits outputs hiddenstates1 if not model config isencoderdecoder else outputs decoderhiddenstates1 logitsfa outputsfa hiddenstates1 if not model config isencoderdecoder else outputsfa decoderhiddenstates1 assert torch allcloselogitsfa 1 logits 1 atol4e2 rtol4e2 requireflashattn requiretorchgpu mark flashattntest slow def testflashattn2generateleftpaddingself import torch for modelclass in self allgenerativemodelclasses if not modelclass supportsflashattn2 self skiptestfmodelclass name does not support flash attention 2 config inputsdict self modeltester prepareconfigandinputsforcommon model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model modelclass frompretrained tmpdirname torchdtypetorch float16 useflashattention2false lowcpumemusagetrue totorchdevice dummyinput inputsdictmodel maininputname if dummyinput dtype in torch float32 torch bfloat16 dummyinput dummyinput totorch float16 dummyattentionmask inputsdict getattentionmask torch oneslikedummyinput make sure we do left padding dummyattentionmask 1 0 dummyattentionmask 1 1 out model generate dummyinput attentionmaskdummyattentionmask maxnewtokens1 dosamplefalse model modelclass frompretrained tmpdirname torchdtypetorch float16 useflashattention2true lowcpumemusagetrue totorchdevice outfa model generate dummyinput attentionmaskdummyattentionmask maxnewtokens1 dosamplefalse self asserttruetorch equalout outfa requireflashattn requiretorchgpu mark flashattntest slow def testflashattn2generatepaddingrightself import torch for modelclass in self allgenerativemodelclasses if not modelclass supportsflashattn2 self skiptestfmodelclass name does not support flash attention 2 config inputsdict self modeltester prepareconfigandinputsforcommon model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model modelclass frompretrained tmpdirname torchdtypetorch float16 useflashattention2false lowcpumemusagetrue totorchdevice dummyinput inputsdictmodel maininputname if dummyinput dtype in torch float32 torch bfloat16 dummyinput dummyinput totorch float16 dummyattentionmask inputsdict getattentionmask torch oneslikedummyinput make sure we do right padding dummyattentionmask 1 1 dummyattentionmask 1 0 out model generate dummyinput attentionmaskdummyattentionmask maxnewtokens1 dosamplefalse model modelclass frompretrained tmpdirname torchdtypetorch float16 useflashattention2true lowcpumemusagetrue totorchdevice outfa model generate dummyinput attentionmaskdummyattentionmask maxnewtokens1 dosamplefalse self asserttruetorch equalout outfa requireflashattn requiretorchgpu mark flashattntest slow def testflashattn2generateusecacheself import torch maxnewtokens 30 for modelclass in self allgenerativemodelclasses if not modelclass supportsflashattn2 self skiptestfmodelclass name does not support flash attention 2 config inputsdict self modeltester prepareconfigandinputsforcommon dummyinput inputsdictmodelclass maininputname if dummyinput dtype in torch float32 torch bfloat16 dummyinput dummyinput totorch float16 make sure that all models have enough positions for generation if hasattrconfig maxpositionembeddings config maxpositionembeddings maxnewtokens dummyinput shape1 1 model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname dummyattentionmask inputsdict getattentionmask torch oneslikedummyinput model modelclass frompretrained tmpdirname torchdtypetorch float16 useflashattention2true lowcpumemusagetrue totorchdevice just test that a large cache works as expected model generate dummyinput attentionmaskdummyattentionmask maxnewtokensmaxnewtokens dosamplefalse usecachetrue requireflashattn requiretorchgpu requirebitsandbytes mark flashattntest slow def testflashattn2fp32lnself import torch for modelclass in self allgenerativemodelclasses if not modelclass supportsflashattn2 self skiptestfmodelclass name does not support flash attention 2 config inputsdict self modeltester prepareconfigandinputsforcommon model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname dummyinput inputsdictmodel maininputname dummyattentionmask inputsdict getattentionmask torch oneslikedummyinput if model config isencoderdecoder dummydecoderinputids inputsdictdecoderinputids dummydecoderattentionmask inputsdictdecoderattentionmask model modelclass frompretrained tmpdirname torchdtypetorch float16 useflashattention2true lowcpumemusagetrue loadin4bittrue for param in model namedparameters upcast only layer norms if param dtype torch float16 or param dtype torch bfloat16 param data param data totorch float32 if model config isencoderdecoder modeldummyinput decoderinputidsdummydecoderinputids with attention mask model dummyinput attentionmaskdummyattentionmask decoderinputidsdummydecoderinputids decoderattentionmaskdummydecoderattentionmask else modeldummyinput with attention mask modeldummyinput attentionmaskdummyattentionmask ispttfcrosstest def testtffromptsafetensorsself for modelclass in self allmodelclasses config inputsdict self modeltester prepareconfigandinputsforcommon tfmodelclassname tf modelclass name add the tf at the beginning if not hasattrtransformers tfmodelclassname transformers does not have this model in tf version yet return tfmodelclass getattrtransformers tfmodelclassname ptmodel modelclassconfig with tempfile temporarydirectory as tmpdirname ptmodel savepretrainedtmpdirname safeserializationtrue tfmodel1 tfmodelclass frompretrainedtmpdirname frompttrue ptmodel savepretrainedtmpdirname safeserializationfalse tfmodel2 tfmodelclass frompretrainedtmpdirname frompttrue check models are equal for p1 p2 in ziptfmodel1 weights tfmodel2 weights self asserttruenp allclosep1 numpy p2 numpy isptflaxcrosstest def testflaxfromptsafetensorsself for modelclass in self allmodelclasses config inputsdict self modeltester prepareconfigandinputsforcommon flaxmodelclassname flax modelclass name add the flax at the beginning if not hasattrtransformers flaxmodelclassname transformers does not have this model in flax version yet return flaxmodelclass getattrtransformers flaxmodelclassname ptmodel modelclassconfig with tempfile temporarydirectory as tmpdirname ptmodel savepretrainedtmpdirname safeserializationtrue flaxmodel1 flaxmodelclass frompretrainedtmpdirname frompttrue ptmodel savepretrainedtmpdirname safeserializationfalse flaxmodel2 flaxmodelclass frompretrainedtmpdirname frompttrue check models are equal self asserttruecheckmodelsequalflaxmodel1 flaxmodel2 requireflashattn requiretorchgpu mark flashattntest slow def testflashattn2fromconfigself import torch for modelclass in self allgenerativemodelclasses if not modelclass supportsflashattn2 self skiptestfmodelclass name does not support flash attention 2 config self modeltester prepareconfigandinputsforcommon todo to change it in the future with other relevant auto classes fa2model automodelforcausallm fromconfig config useflashattention2true torchdtypetorch bfloat16 totorchdevice dummyinput torch longtensor0 2 3 4 0 2 3 4 totorchdevice dummyattentionmask torch longtensor1 1 1 1 0 1 1 1 totorchdevice fa2correctlyconverted false for module in fa2model namedmodules if flashattention in module class name fa2correctlyconverted true break self asserttruefa2correctlyconverted fa2modelinputidsdummyinput attentionmaskdummyattentionmask with tempfile temporarydirectory as tmpdirname fa2model savepretrainedtmpdirname modelfrompretrained automodelforcausallm frompretrainedtmpdirname self assertfalsegetattrmodelfrompretrained config flashattn2enabled false fa2correctlyconverted false for module in modelfrompretrained namedmodules if flashattention in module class name fa2correctlyconverted true break self assertfalsefa2correctlyconverted globalrng random random def idstensorshape vocabsize rngnone namenone creates a random int32 tensor of the shape within the vocab size if rng is none rng globalrng totaldims 1 for dim in shape totaldims dim values for in rangetotaldims values appendrng randint0 vocabsize 1 return torch tensordatavalues dtypetorch long devicetorchdevice viewshape contiguous def randomattentionmaskshape rngnone namenone attnmask idstensorshape vocabsize2 rngnone namenone make sure that at least one token is attended to for each batch we choose the 1st token so this property of at least one being nonzero still holds after applying causal mask attnmask 0 1 return attnmask def floatstensorshape scale1 0 rngnone namenone coding utf 8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license use the first letter of the name to get a value and go from a 13 to z 12 prune heads if needed initialize weights tie weights should be skipped when not initializing all weights since from_pretrained calls tie weights anyways make sure we don t have nans the config file and the generation config file if it can generate should be saved check the keys are in the original state_dict check that certain keys didn t get saved with the model test we can load the state dict in the model necessary for the checkpointing api in trainer at init model should have gradient checkpointing disabled check enable works loop over all modules and check that relevant modules have gradient_checkpointing set to true check disable works loop over all modules and check that relevant modules have gradient_checkpointing set to false make a copy of model class to not break future tests from https stackoverflow com questions 9541025 how to copy a python class make sure that all keys are expected for test make init deterministic but make sure that non initialized weights throw errors nevertheless this will often delete a single weight of a multi weight module to test an edge case check that certain keys didn t get saved with the model before we test anything make a copy of model class to not break future tests from https stackoverflow com questions 9541025 how to copy a python class make sure that all keys are expected for test make init deterministic but make sure that non initialized weights throw errors nevertheless this will often delete a single weight of a multi weight module to test an edge case check that certain keys didn t get saved with the model signature parameters is an ordereddict so arg_names order is deterministic unfreeze additional layers scenario 1 default behaviour scenario 2 with use_reentrant true this is the default value that is used in pytorch s torch utils checkpoint checkpoint scenario 3 with use_reentrant false pytorch suggests users to use this value for future releases https pytorch org docs stable checkpoint html check that output_attentions also work using config loss is at first position loss is added to beginning question answering model returns start_logits and end_logits start_logits and end_logits instead of only 1 output past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine this is copied from torch testing _internal jit_utils py clear_class_registry torch 1 8 has no _clear_class_state in torch jit _state to be sure we have no nan fstm still requires this hack fstm should probably be refactored similar to bart afterward layoutlmv2 requires additional inputs when traced model is checked an error is produced due to name mangling bros requires additional inputs bbox when traced model is checked an error is produced due to name mangling avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb to be sure we have no nan fstm still requires this hack fstm should probably be refactored similar to bart afterward test that the model can be serialized and restored properly avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb to be sure we have no nan prepare head_mask set require_grad after having prepared the tensor to avoid error leaf variable has been moved into the graph interior necessary diferentiation because of t5 model test that we can get a gradient back for importance score computation remove nan check we don t have more than 25 nans arbitrary remove them the test is less complete encoder decoder models have only 2 layers in each module todo to have this check we will need at least 3 layers do we really need it self assertequal attentions 1 shape 3 self model_tester num_attention_heads todo to have this check we will need at least 3 layers do we really need it self assertequal attentions 1 shape 3 self model_tester num_attention_heads todo to have this check we will need at least 3 layers do we really need it self assertequal attentions 1 shape 3 self model_tester num_attention_heads check that output_hidden_states also work using config no need to test all models as different heads yield the same functionality seq2seq models encoder decoder only models retrieve the embeddings and clone theme check that resizing the position embeddings with a larger max_position_embeddings increases the model s postion embeddings size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the position embeddings with a smaller max_position_embeddings decreases the model s max_position_embeddings check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that adding and removing tokens has not modified the first part of the embedding matrix retrieve the embeddings and clone theme check that resizing the token embeddings with a larger vocab size increases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary make sure that decoder_input_ids are resized as well check that adding and removing tokens has not modified the first part of the embedding matrix check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size if model cannot untied embeddings leave test if no output embeddings leave test check that resizing the token embeddings with a larger vocab size increases the model s vocab size check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check bias if present check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary check that the model can still do a forward pass successfully every parameter should be resized the main input is the name of the argument after self some models define this as none in that case we are on a head model but every single key is not actual parameters and this is tested in test_tied_model_weights_key_ignore test check that the embedding layer and decoding layer are the same in size and in value self asserttrue check_same_values embeddings decoding check that after modification they remain the same embeddings weight data div_ 2 check that the embedding layer and decoding layer are the same in size and in value self asserttrue embeddings weight shape decoding weight shape self asserttrue check_same_values embeddings decoding check that after modification they remain the same decoding weight data div_ 4 check that the embedding layer and decoding layer are the same in size and in value self asserttrue embeddings weight shape decoding weight shape self asserttrue check_same_values embeddings decoding check that after resize they remain tied decoding weight data mul_ 20 check that the embedding layer and decoding layer are the same in size and in value self asserttrue model transformer wte weight shape model lm_head weight shape self asserttrue check_same_values model transformer wte model lm_head checking the state dicts are correct checking there was no complain of missing weights checking the tensor sharing are correct checking the state dicts are correct checking there was no complain of missing weights these are all the pointers of shared tensors detect we get a hit for each key removed tied weights found from tied params there should only be one left after we are nuking all weights on file so every parameter should yell on load we re going to detect if we yell too much or too little remove tied weights from extra missing they are normally not warned as missing if their tied counterpart is present but here there are no weights at all so we do get the warning we remove the group from extra_missing if not all weights from group are in it remove nonpersistent buffers from missed_missing don t copy this method to model specific test file todo remove this method once the issues are all fixed make sure no sequence has all zeros as attention mask make sure no all 0s attention masks to avoid failure at this moment put 1 at the beginning of sequences to make it still work when combining causal attention masks todo remove this line once a fix regarding large negative values for attention mask is done here we make the first sequence with all 0s as attention mask currently this will fail for tfwav2vec2model this is caused by the different large negative values like 1e 4 1e 9 1e 30 and inf for attention mask across models frameworks todo enable this block once the large negative values thing is cleaned up see https github com huggingface transformers issues 14859 attention_mask torch cat torch zeros_like attention_mask 1 dtype attention_mask dtype attention_mask 1 dim 0 don t copy this method to model specific test file todo remove this method once the issues are all fixed for temporarily ignoring some failed test cases issues to be fixed tfgpt2 has past_key_values as a tensor while gpt2 has it as a tuple create new outputs from the remaining fields copied from tests test_modeling_tf_common tfmodeltestermixin check_pt_tf_outputs check the outputs from pytorch and tensorflow models are close enough checks are done in a recursive way args model_class the class of the model that is currently testing for example tfbertmodel tfbertformaskedlm tfbertforsequenceclassification etc mainly used for providing more informative error messages name str the name of the output for example output hidden_states output attentions etc attributes tuple str the names of the output s element if the output is a tuple list with each element being a named field in the output allow modeloutput e g clipoutput has text_model_output and vision_model_output don t copy this block to model specific test file todo remove this method and this line after issues are fixed convert to the case of tuple appending each key to the current string name allow list e g transfoxlmodeloutput mems is a list of tensors case 1 each output has assigned name e g a tuple form of a modeloutput case 2 each output has no assigned name e g hidden states of each layer add an index to name deal with numpy s scalars to make replacing nan values by 0 work skip key that does not exist in tf other general float inputs send pytorch inputs to the correct device send pytorch model to the correct device check predictions on first output logits hidden states are close enough given low level computational differences tf models returned loss is usually a tensor rather than a scalar see hf_compute_loss it uses tf keras losses reduction none change it here to a scalar to match pytorch models loss add the tf at the beginning transformers does not have this model in tf version yet output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e 4 1e 9 1e 30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it not all models accept labels in the forward pass yet make sure only tf inputs are forward that actually exist in function args remove all head masks for some models e g base models there is no label returned set the input dict to none to avoid check outputs twice for the same input dicts check we can load pt model in tf and vice versa with model model functions here requires tf_inputs_dict to build tf_model original test check without labels check with labels check we can load pt model in tf and vice versa with checkpoint model functions original test check without labels check with labels args model_class the class of the model that is currently testing for example etc currently unused but it could make debugging easier and faster names a string or a list of strings these specify what fx_outputs pt_outputs represent in the model outputs currently unused but in the future we could use this information to make the error message clearer by giving the name s of the output tensor s with large difference s between pt and flax allow modeloutput e g clipoutput has text_model_output and vision_model_output convert to the case of tuple appending each key to the current string name allow list e g transfoxlmodeloutput mems is a list of tensors case 1 each output has assigned name e g a tuple form of a modeloutput case 2 each output has no assigned name e g hidden states of each layer add an index to name using np asarray gives valueerror assignment destination is read only at the line fx_outputs fx_nans 0 deal with numpy s scalars to make replacing nan values by 0 work no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax send pytorch model to the correct device no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax make sure weights are tied in pytorch send pytorch model to the correct device send pytorch model to the correct device some params shouldn t be scattered by nn dataparallel so just remove them if they are present move input tensors to cuda o wrap model in nn dataparallel a candidate for testing_utils returns a list of cuda memory allocations per gpu in mbs needs a large model to see the difference 1 single gpu memory load unload memory measurements retrieve initial memory usage can easily be 0 6 1 5gb if cuda kernels have been preloaded by previous tests put model on device 0 and take a memory snapshot the memory use on device 0 should be higher than it was initially 2 mp test it s essential to re calibrate the usage before the next stage spread model layers over multiple devices assert that the memory use on all devices is higher than it was when loaded only on cpu assert that the memory use of device 0 is lower than it was when the entire model was loaded on it assert that the memory use of device 1 is higher than it was when the entire model was loaded on device 0 and device 1 wasn t used at all find device in device_map this errors out cause it s missing an offload folder this doesn t error out as it s in safetensors and doesn t need an offload folder we test several splits of sizes to make sure it works making sure part of the model will actually end up offloaded we test several splits of sizes to make sure it works making sure part of the model will actually end up offloaded this tests that we do not trigger the warning form pytorch using a target size that is different to the input size this will likely lead to incorrect results due to broadcasting please ensure they have the same size which is a symptom something in wrong for the regression problem see https github com huggingface transformers issues 11780 fails when we don t set ignore_mismatched_sizes true just a consistency check to make sure we are not running tests on 80m parameter models check with inference dropout make sure we do left padding make sure we do right padding make sure that all models have enough positions for generation just test that a large cache works as expected upcast only layer norms with attention mask with attention mask add the tf at the beginning transformers does not have this model in tf version yet check models are equal add the flax at the beginning transformers does not have this model in flax version yet check models are equal todo to change it in the future with other relevant auto classes creates a random int32 tensor of the shape within the vocab size make sure that at least one token is attended to for each batch we choose the 1st token so this property of at least one being non zero still holds after applying causal mask creates a random float32 tensor
import collections import copy import gc import inspect import os import os.path import pickle import random import re import tempfile import warnings from collections import defaultdict from typing import Dict, List, Tuple import numpy as np from pytest import mark import transformers from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSequenceClassification, PretrainedConfig, is_torch_available, logging, ) from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from transformers.testing_utils import ( CaptureLogger, is_pt_flax_cross_test, is_pt_tf_cross_test, require_accelerate, require_bitsandbytes, require_flash_attn, require_safetensors, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import ( CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_NAME, is_accelerate_available, is_flax_available, is_tf_available, is_torch_fx_available, ) from transformers.utils.generic import ModelOutput if is_accelerate_available(): from accelerate.utils import compute_module_sizes if is_torch_available(): import torch from safetensors.torch import load_file as safe_load_file from safetensors.torch import save_file as safe_save_file from torch import nn from transformers import MODEL_MAPPING, AdaptiveEmbedding from transformers.pytorch_utils import id_tensor_storage if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp from tests.test_modeling_flax_utils import check_models_equal from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init def _mock_init_weights(self, module): for name, param in module.named_parameters(recurse=False): value = ord(name[0].lower()) - 110 param.data.fill_(value) def _mock_all_init_weights(self): if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) import transformers.modeling_utils if transformers.modeling_utils._init_weights: for module in self.modules(): module._is_hf_initialized = False self.apply(self._initialize_weights) self.tie_weights() @require_torch class ModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () fx_compatible = False test_torchscript = True test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = False test_head_masking = True test_mismatched_shapes = True test_missing_keys = True test_model_parallel = False is_encoder_decoder = False has_attentions = True model_split_percents = [0.5, 0.7, 0.9] def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } elif model_class.__name__ in get_values(MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES): inputs_dict.pop("attention_mask") if return_labels: if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = torch.zeros( (self.model_tester.batch_size, num_patches**2), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() return inputs_dict def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_save_load(out1, out2): out_2 = out2.cpu().numpy() out_2[np.isnan(out_2)] = 0 out_1 = out1.cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_save_load(tensor1, tensor2) else: check_save_load(first, second) def test_from_pretrained_no_checkpoint(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) state_dict = model.state_dict() new_model = model_class.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_keep_in_fp32_modules(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._keep_in_fp32_modules is None: return model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16) for name, param in model.named_parameters(): if any(n in model_class._keep_in_fp32_modules for n in name.split(".")): self.assertTrue(param.dtype == torch.float32) else: self.assertTrue(param.dtype == torch.float16, name) def test_save_load_keys_to_ignore_on_save(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) _keys_to_ignore_on_save = getattr(model, "_keys_to_ignore_on_save", None) if _keys_to_ignore_on_save is None: continue for k in _keys_to_ignore_on_save: self.assertIn(k, model.state_dict().keys(), "\n".join(model.state_dict().keys())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) output_model_file = os.path.join(tmpdirname, SAFE_WEIGHTS_NAME) state_dict_saved = safe_load_file(output_model_file) for k in _keys_to_ignore_on_save: self.assertNotIn(k, state_dict_saved.keys(), "\n".join(state_dict_saved.keys())) load_result = model.load_state_dict(state_dict_saved, strict=False) keys_to_ignore = set(model._keys_to_ignore_on_save) if hasattr(model, "_tied_weights_keys"): keys_to_ignore.update(set(model._tied_weights_keys)) self.assertTrue(len(load_result.missing_keys) == 0 or set(load_result.missing_keys) == keys_to_ignore) self.assertTrue(len(load_result.unexpected_keys) == 0) def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_gradient_checkpointing_enable_disable(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue model = model_class(config) self.assertFalse(model.is_gradient_checkpointing) model.gradient_checkpointing_enable() self.assertTrue(model.is_gradient_checkpointing) for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertTrue( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to True" ) model.gradient_checkpointing_disable() self.assertFalse(model.is_gradient_checkpointing) for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertFalse( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to False" ) def test_save_load_fast_init_from_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: return base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue class CopyClass(model_class): pass model_class_copy = CopyClass model_class_copy._keys_to_ignore_on_load_missing = [] model_class_copy._init_weights = _mock_init_weights model_class_copy.init_weights = _mock_all_init_weights model = base_class(config) state_dict = model.state_dict() random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = model_class_copy.from_pretrained(tmpdirname) model_slow_init = model_class_copy.from_pretrained(tmpdirname, _fast_init=False) for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = (model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key]).sum().item() else: max_diff = (model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_fast_init_to_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: return base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue class CopyClass(base_class): pass base_class_copy = CopyClass base_class_copy._keys_to_ignore_on_load_missing = [] base_class_copy._init_weights = _mock_init_weights base_class_copy.init_weights = _mock_all_init_weights model = model_class(config) state_dict = model.state_dict() random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] with tempfile.TemporaryDirectory() as tmpdirname: model.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = base_class_copy.from_pretrained(tmpdirname) model_slow_init = base_class_copy.from_pretrained(tmpdirname, _fast_init=False) for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = torch.max( model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key] ).item() else: max_diff = torch.max( torch.abs(model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]) ).item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) elif model_class.__name__ in [*get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] and self.has_attentions: expected_arg_names = ["pixel_values", "output_hidden_states", "output_attentions", "return_dict"] self.assertListEqual(arg_names, expected_arg_names) elif model_class.__name__ in [*get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] and not self.has_attentions: expected_arg_names = ["pixel_values", "output_hidden_states", "return_dict"] self.assertListEqual(arg_names, expected_arg_names) else: expected_arg_names = [model.main_input_name] self.assertListEqual(arg_names[:1], expected_arg_names) def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if ( model_class.__name__ in [*get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] or not model_class.supports_gradient_checkpointing ): continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model.train() for p in model.parameters(): p.requires_grad_(True) optimizer = torch.optim.SGD(model.parameters(), lr=0.01) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() optimizer.step() for k, v in model.named_parameters(): if v.requires_grad: self.assertTrue(v.grad is not None, f"{k} in {model_class.__name__} has no gradient!") def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): self.check_training_gradient_checkpointing() def test_training_gradient_checkpointing_use_reentrant(self): self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": True}) def test_training_gradient_checkpointing_use_reentrant_false(self): self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": False}) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @slow def test_torchscript_simple(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_attentions = True self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_hidden_state(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True self._create_and_check_torchscript(config, inputs_dict) def clear_torch_jit_class_registry(self): torch._C._jit_clear_class_registry() torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() if hasattr(torch.jit._state, "_clear_class_state"): torch.jit._state._clear_class_state() def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: if model.config.is_encoder_decoder: model.config.use_cache = False main_input = inputs[main_input_name] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] model(main_input, attention_mask, decoder_input_ids, decoder_attention_mask) traced_model = torch.jit.trace( model, (main_input, attention_mask, decoder_input_ids, decoder_attention_mask) ) elif "bbox" in inputs and "image" in inputs: input_ids = inputs["input_ids"] bbox = inputs["bbox"] image = inputs["image"].tensor model(input_ids, bbox, image) traced_model = torch.jit.trace( model, (input_ids, bbox, image), check_trace=False ) elif "bbox" in inputs: input_ids = inputs["input_ids"] bbox = inputs["bbox"] model(input_ids, bbox) traced_model = torch.jit.trace( model, (input_ids, bbox), check_trace=False ) else: main_input = inputs[main_input_name] model(main_input) traced_model = torch.jit.trace(model, main_input) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) self.clear_torch_jit_class_registry() def test_torch_fx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict) def test_torch_fx_output_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict, output_loss=True) def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) self.clear_torch_jit_class_registry() def test_headmasking(self): if not self.test_head_masking: return global_rng.seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() global_rng.seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() head_mask = torch.ones( self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device, ) head_mask[0, 0] = 0 head_mask[-1, :-1] = 0 head_mask.requires_grad_(requires_grad=True) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) output = sum(t.sum() for t in outputs[0]) output = output.sum() output.backward() multihead_outputs = head_mask.grad self.assertIsNotNone(multihead_outputs) self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) def check_attentions_validity(attentions): for t in attentions: self.assertLess( torch.sum(torch.isnan(t)), t.numel() / 4 ) attentions = [ t.masked_fill(torch.isnan(t), 0.0) for t in attentions ] self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0) if len(attentions) > 2: self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_head_pruning(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_pretrained(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_config_init(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_integration(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = {1: [1, 2]} config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) heads_to_prune = {0: [0], 1: [1, 2]} model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2]}) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def test_feed_forward_chunking(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: torch.manual_seed(0) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model.eval() hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] torch.manual_seed(0) config.chunk_size_feed_forward = 1 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) def test_resize_position_vector_embeddings(self): if not self.test_resize_position_embeddings: return ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() max_position_embeddings = config.max_position_embeddings if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() encoder_cloned_embeddings = encoder_model_embed.weight.clone() decoder_cloned_embeddings = decoder_model_embed.weight.clone() else: model_embed = model.get_position_embeddings() cloned_embeddings = model_embed.weight.clone() model.resize_position_embeddings(max_position_embeddings + 10) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings + 10) if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] + 10) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] + 10) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model.resize_position_embeddings(max_position_embeddings - 5) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings - 5) if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] - 5) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] - 5) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 5) model(**self._prepare_for_class(inputs_dict, model_class)) models_equal = True if model.config.is_encoder_decoder: for p1, p2 in zip(encoder_cloned_embeddings, encoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False for p1, p2 in zip(decoder_cloned_embeddings, decoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False else: for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) self.assertTrue(model.config.vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], model.config.vocab_size) self.assertTrue(model.config.vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) if model.get_output_embeddings() is None: continue model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding, AdaptiveEmbedding)) model.set_input_embeddings(nn.Embedding(10, 10)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_correct_missing_keys(self): if not self.test_missing_keys: return config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) base_model_prefix = model.base_model_prefix if hasattr(model, base_model_prefix): extra_params = {k: v for k, v in model.named_parameters() if not k.startswith(base_model_prefix)} extra_params.update({k: v for k, v in model.named_buffers() if not k.startswith(base_model_prefix)}) if model._keys_to_ignore_on_load_missing: for key in model._keys_to_ignore_on_load_missing: extra_params.pop(key, None) if not extra_params: continue with tempfile.TemporaryDirectory() as temp_dir_name: model.base_model.save_pretrained(temp_dir_name) model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True) self.assertGreater(len(loading_info["missing_keys"]), 0, model.__class__.__name__) def test_tie_model_weights(self): if not self.test_torchscript: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_same_values(layer_1, layer_2): equal = True for p1, p2 in zip(layer_1.weight, layer_2.weight): if p1.data.ne(p2.data).sum() > 0: equal = False return equal for model_class in self.all_model_classes: config.torchscript = True model_not_tied = model_class(config) if model_not_tied.get_output_embeddings() is None: continue config_tied = copy.deepcopy(config) config_tied.torchscript = False model_tied = model_class(config_tied) params_tied = list(model_tied.parameters()) model_tied.resize_token_embeddings(config.vocab_size + 10) params_tied_2 = list(model_tied.parameters()) self.assertEqual(len(params_tied_2), len(params_tied)) @require_safetensors def test_can_use_safetensors(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model_tied = model_class(config) with tempfile.TemporaryDirectory() as d: try: model_tied.save_pretrained(d, safe_serialization=True) except Exception as e: raise Exception(f"Class {model_class.__name__} cannot be saved using safetensors: {e}") model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) reloaded_state = model_reloaded.state_dict() for k, v in model_tied.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) self.assertEqual(infos["missing_keys"], []) ptrs = defaultdict(list) for k, v in model_tied.state_dict().items(): ptrs[v.data_ptr()].append(k) shared_ptrs = {k: v for k, v in ptrs.items() if len(v) > 1} for _, shared_names in shared_ptrs.items(): reloaded_ptrs = {reloaded_state[k].data_ptr() for k in shared_names} self.assertEqual( len(reloaded_ptrs), 1, f"The shared pointers are incorrect, found different pointers for keys {shared_names}", ) def test_load_save_without_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = False for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as d: model.save_pretrained(d) model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) reloaded_state = model_reloaded.state_dict() for k, v in model.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) self.assertEqual(infos["missing_keys"], []) def test_tied_weights_keys(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = True for model_class in self.all_model_classes: model_tied = model_class(config) ptrs = collections.defaultdict(list) for name, tensor in model_tied.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) tied_params = [names for _, names in ptrs.items() if len(names) > 1] tied_weight_keys = model_tied._tied_weights_keys if model_tied._tied_weights_keys is not None else [] for key in tied_weight_keys: if not any(re.search(key, p) for group in tied_params for p in group): raise ValueError(f"{key} is not a tied weight key for {model_class}.") for key in tied_weight_keys: for i in range(len(tied_params)): tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None] tied_params = [group for group in tied_params if len(group) > 1] self.assertListEqual( tied_params, [], f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.", ) def test_model_weights_reload_no_missing_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) placeholder_dict = {"tensor": torch.tensor([1, 2])} safe_save_file(placeholder_dict, os.path.join(tmp_dir, "model.safetensors"), metadata={"format": "pt"}) model_reloaded, infos = model_class.from_pretrained(tmp_dir, output_loading_info=True) prefix = f"{model_reloaded.base_model_prefix}." params = dict(model_reloaded.named_parameters()) params.update(dict(model_reloaded.named_buffers())) param_names = {k[len(prefix) :] if k.startswith(prefix) else k for k in params.keys()} missing_keys = set(infos["missing_keys"]) extra_missing = missing_keys - param_names ptrs = collections.defaultdict(list) for name, tensor in model_reloaded.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) tied_params = [names for _, names in ptrs.items() if len(names) > 1] for group in tied_params: group = {k[len(prefix) :] if k.startswith(prefix) else k for k in group} if len(group - extra_missing) > 0: extra_missing = extra_missing - set(group) self.assertEqual( extra_missing, set(), f"This model {model_class.__name__} might be missing some `keys_to_ignore`: {extra_missing}. " f"For debugging, tied parameters are {tied_params}", ) missed_missing = param_names - missing_keys buffers = [n for n, _ in model_reloaded.named_buffers()] nonpersistent_buffers = {n for n in buffers if n not in model_reloaded.state_dict()} nonpersistent_buffers = { k[len(prefix) :] if k.startswith(prefix) else k for k in nonpersistent_buffers } missed_missing = missed_missing - nonpersistent_buffers if model_reloaded._keys_to_ignore_on_load_missing is None: expected_missing = set() else: expected_missing = set(model_reloaded._keys_to_ignore_on_load_missing) self.assertEqual( missed_missing, expected_missing, f"This model {model_class.__name__} ignores keys {missed_missing} but they look like real" " parameters. If they are non persistent buffers make sure to instantiate them with" " `persistent=False`", ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def _make_attention_mask_non_null(self, inputs_dict): for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] attention_mask = torch.cat( [torch.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], dim=-1 ) inputs_dict[k] = attention_mask def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "FlaubertWithLMHeadModel", "FunnelForPreTraining", "ElectraForPreTraining", "XLMWithLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("GPT2"): tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `attributes` should have the same length as `tf_outputs`", ) else: attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between PyTorch and TF is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict = {} for key, tensor in pt_inputs_dict.items(): if isinstance(tensor, bool): tf_inputs_dict[key] = tensor elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "pixel_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "input_features": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif tensor.is_floating_point(): tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) else: tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.int32) return tf_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict): tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } pt_model.to(torch_device) pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(pt_model)) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_model_class_name = "TF" + model_class.__name__ if not hasattr(transformers, tf_model_class_name): return config.output_hidden_states = True config.output_attentions = self.has_attentions self._make_attention_mask_non_null(inputs_dict) tf_model_class = getattr(transformers, tf_model_class_name) pt_model = model_class(config) tf_model = tf_model_class(config) pt_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs_dict_with_labels = self._prepare_for_class( inputs_dict, model_class, return_labels=True if "labels" in inspect.signature(model_class.forward).parameters.keys() else False, ) tf_input_keys = set(inspect.signature(tf_model.call).parameters.keys()) tf_input_keys.discard("head_mask") tf_input_keys.discard("cross_attn_head_mask") tf_input_keys.discard("decoder_head_mask") pt_inputs_dict = {k: v for k, v in pt_inputs_dict.items() if k in tf_input_keys} pt_inputs_dict_with_labels = {k: v for k, v in pt_inputs_dict_with_labels.items() if k in tf_input_keys} if not set(pt_inputs_dict_with_labels.keys()).symmetric_difference(pt_inputs_dict.keys()): pt_inputs_dict_with_labels = None tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model = fx_model_class(config, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model = fx_model_class(config, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) pt_model.tie_weights() pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch_multi_gpu def test_model_parallelization(self): if not self.test_model_parallel: return def get_current_gpu_memory_use(): per_device_memory = [] for id in range(torch.cuda.device_count()): with torch.cuda.device(id): per_device_memory.append(torch.cuda.memory_allocated() >> 20) return per_device_memory config = self.model_tester.get_large_model_config() for model_class in self.all_parallelizable_model_classes: torch.cuda.empty_cache() memory_at_start = get_current_gpu_memory_use() model = model_class(config) model.to("cuda:0") memory_after_model_load = get_current_gpu_memory_use() self.assertGreater(memory_after_model_load[0], memory_at_start[0]) del model gc.collect() torch.cuda.empty_cache() memory_at_start = get_current_gpu_memory_use() model = model_class(config) model.parallelize() memory_after_parallelization = get_current_gpu_memory_use() for n in range(len(model.device_map.keys())): self.assertGreater(memory_after_parallelization[n], memory_at_start[n]) self.assertLess(memory_after_parallelization[0], memory_after_model_load[0]) self.assertGreater(memory_after_parallelization[1], memory_after_model_load[1]) del model gc.collect() torch.cuda.empty_cache() @require_torch_multi_gpu def test_model_parallel_equal_results(self): if not self.test_model_parallel: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_parallelizable_model_classes: inputs_dict = self._prepare_for_class(inputs_dict, model_class) def cast_to_device(dictionary, device): output = {} for k, v in dictionary.items(): if isinstance(v, torch.Tensor): output[k] = v.to(device) else: output[k] = v return output model = model_class(config) output = model(**cast_to_device(inputs_dict, "cpu")) model.parallelize() parallel_output = model(**cast_to_device(inputs_dict, "cuda:0")) for value, parallel_value in zip(output, parallel_output): if isinstance(value, torch.Tensor): self.assertTrue(torch.allclose(value, parallel_value.to("cpu"), atol=1e-7)) elif isinstance(value, (Tuple, List)): for value_, parallel_value_ in zip(value, parallel_value): self.assertTrue(torch.allclose(value_, parallel_value_.to("cpu"), atol=1e-7)) def check_device_map_is_respected(self, model, device_map): for param_name, param in model.named_parameters(): while len(param_name) > 0 and param_name not in device_map: param_name = ".".join(param_name.split(".")[:-1]) if param_name not in device_map: raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") param_device = device_map[param_name] if param_device in ["cpu", "disk"]: self.assertEqual(param.device, torch.device("meta")) else: self.assertEqual(param.device, torch.device(param_device)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_disk_offload_bin(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, safe_serialization=False) with self.assertRaises(ValueError): max_size = int(self.model_split_percents[0] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = model_class.from_pretrained( tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_disk_offload_safetensors(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_cpu_offload(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_multi_gpu def test_model_parallelism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if model_class.__name__ not in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), ]: continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) with self.assertRaises(RuntimeError): new_model = AutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(RuntimeError): new_model_without_prefix = AutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = AutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) new_model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = AutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) new_model_without_prefix.to(torch_device) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_model_is_small(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) num_params = model.num_parameters() assert ( num_params < 1000000 ), f"{model_class} is too big for the common tests ({num_params})! It should have 1M max." @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_conversion(self): import torch config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=True ).to(torch_device) for _, module in model.named_modules(): if "FlashAttention" in module.__class__.__name__: return self.assertTrue(False, "FlashAttention2 modules not found in model") @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference(self): import torch for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False ) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) model.train() _ = model_fa(dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): import torch for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False ) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_generate_left_padding(self): import torch for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=False, low_cpu_mem_usage=True ).to(torch_device) dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) dummy_attention_mask[:, :-1] = 0 dummy_attention_mask[:, -1:] = 1 out = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True ).to(torch_device) out_fa = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) self.assertTrue(torch.equal(out, out_fa)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): import torch for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=False, low_cpu_mem_usage=True ).to(torch_device) dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 out = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True ).to(torch_device) out_fa = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) self.assertTrue(torch.equal(out, out_fa)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_generate_use_cache(self): import torch max_new_tokens = 30 for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True, ).to(torch_device) _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, ) @require_flash_attn @require_torch_gpu @require_bitsandbytes @mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): import torch for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) if model.config.is_encoder_decoder: dummy_decoder_input_ids = inputs_dict["decoder_input_ids"] dummy_decoder_attention_mask = inputs_dict["decoder_attention_mask"] model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True, load_in_4bit=True, ) for _, param in model.named_parameters(): if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) if model.config.is_encoder_decoder: _ = model(dummy_input, decoder_input_ids=dummy_decoder_input_ids) _ = model( dummy_input, attention_mask=dummy_attention_mask, decoder_input_ids=dummy_decoder_input_ids, decoder_attention_mask=dummy_decoder_attention_mask, ) else: _ = model(dummy_input) _ = model(dummy_input, attention_mask=dummy_attention_mask) @is_pt_tf_cross_test def test_tf_from_pt_safetensors(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_model_class_name = "TF" + model_class.__name__ if not hasattr(transformers, tf_model_class_name): return tf_model_class = getattr(transformers, tf_model_class_name) pt_model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname, safe_serialization=True) tf_model_1 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) pt_model.save_pretrained(tmpdirname, safe_serialization=False) tf_model_2 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) for p1, p2 in zip(tf_model_1.weights, tf_model_2.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @is_pt_flax_cross_test def test_flax_from_pt_safetensors(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() flax_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, flax_model_class_name): return flax_model_class = getattr(transformers, flax_model_class_name) pt_model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname, safe_serialization=True) flax_model_1 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) pt_model.save_pretrained(tmpdirname, safe_serialization=False) flax_model_2 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) self.assertTrue(check_models_equal(flax_model_1, flax_model_2)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_from_config(self): import torch for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, _ = self.model_tester.prepare_config_and_inputs_for_common() fa2_model = AutoModelForCausalLM.from_config( config, use_flash_attention_2=True, torch_dtype=torch.bfloat16 ).to(torch_device) dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [0, 1, 1, 1]]).to(torch_device) fa2_correctly_converted = False for _, module in fa2_model.named_modules(): if "FlashAttention" in module.__class__.__name__: fa2_correctly_converted = True break self.assertTrue(fa2_correctly_converted) _ = fa2_model(input_ids=dummy_input, attention_mask=dummy_attention_mask) with tempfile.TemporaryDirectory() as tmpdirname: fa2_model.save_pretrained(tmpdirname) model_from_pretrained = AutoModelForCausalLM.from_pretrained(tmpdirname) self.assertFalse(getattr(model_from_pretrained.config, "_flash_attn_2_enabled", False)) fa2_correctly_converted = False for _, module in model_from_pretrained.named_modules(): if "FlashAttention" in module.__class__.__name__: fa2_correctly_converted = True break self.assertFalse(fa2_correctly_converted) global_rng = random.Random() def ids_tensor(shape, vocab_size, rng=None, name=None): if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() def random_attention_mask(shape, rng=None, name=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None) attn_mask[:, 0] = 1 return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license creates a random int32 tensor of the shape within the vocab size if rng is none rng random random totaldims 1 for dim in shape totaldims dim values for in rangetotaldims values appendrng randint0 vocabsize 1 output np arrayvalues dtypejnp int32 reshapeshape return output def floatstensorshape scale1 0 rngnone namenone make sure that at least one token is attended to for each batch function extracts relevant parameters into flatten dict from model params appends batch normalization statistics if present if both parameters and batch normalization statistics are present if batchstats in params extract only parameters for the specified head prefix if specified and add batch statistics if fromheadprefix is not none extractedparams flattendictunfreezeparamsparamsfromheadprefix extractedparams updateflattendictparamsbatchstatsfromheadprefix else extractedparams flattendictunfreezeparamsparams extractedparams updateflattendictparamsbatchstats only parameters are present else if fromheadprefix is not none extractedparams flattendictunfreezeparamsfromheadprefix else extractedparams flattendictunfreezeparams return extractedparams requireflax class flaxmodeltestermixin modeltester none allmodelclasses testmismatchedshapes true isencoderdecoder false testheadmasking false hasattentions true def prepareforclassself inputsdict modelclass inputsdict copy deepcopyinputsdict hack for now until we have automodel classes if formultiplechoice in modelclass name inputsdict k jnp broadcasttov none v shape0 self modeltester numchoices v shape1 if isinstancev jnp ndarray np ndarray and k indicesprngkey else v for k v in inputsdict items return inputsdict def assertalmostequalsself a np ndarray b np ndarray tol float diff np absa b max self assertlessequaldiff tol fdifference between torch and flax is diff tol def testmodeloutputsequivalenceself config inputsdict self modeltester prepareconfigandinputsforcommon def checkequivalencemodel tupleinputs dictinputs additionalkwargs tupleoutput modeltupleinputs returndictfalse additionalkwargs dictoutput modeldictinputs returndicttrue additionalkwargs totuple def recursivechecktupleobject dictobject if isinstancetupleobject list tuple for tupleiterablevalue dictiterablevalue in ziptupleobject dictobject recursivechecktupleiterablevalue dictiterablevalue elif tupleobject is none return else self assertalmostequalsjnp nantonumtupleobject jnp nantonumdictobject 1e5 recursivechecktupleoutput dictoutput for modelclass in self allmodelclasses model modelclassconfig tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputhiddenstates true copied from tests testmodelingcommon modeltestermixin checkptflaxoutputs def checkptflaxoutputsself fxoutputs ptoutputs modelclass tol1e5 nameoutputs attributesnone self assertequaltypename str if attributes is not none self assertequaltypeattributes tuple fname the argument attributes should be a tuple allow modeloutput e g clipoutput has textmodeloutput and visionmodeloutput if isinstancefxoutputs modeloutput self asserttrue isinstanceptoutputs modeloutput fname ptoutputs should an instance of modeloutput when fxoutputs is fxkeys tuplek for k v in fxoutputs items if v is not none ptkeys tuplek for k v in ptoutputs items if v is not none self assertequalfxkeys ptkeys fname output keys differ between flax and pytorch convert to the case of tuple appending each key to the current string name attributes tuplefname k for k in fxkeys self checkptflaxoutputs fxoutputs totuple ptoutputs totuple modelclass toltol namename attributesattributes allow list e g transfoxlmodeloutput mems is a list of tensors elif typefxoutputs in tuple list self assertequal typefxoutputs typeptoutputs fname output types differ between flax and pytorch self assertequal lenfxoutputs lenptoutputs fname output lengths differ between flax and pytorch if attributes is not none case 1 each output has assigned name e g a tuple form of a modeloutput self assertequal lenattributes lenfxoutputs fname the tuple attributes should have the same length as fxoutputs else case 2 each output has no assigned name e g hidden states of each layer add an index to name attributes tuplefnameidx for idx in rangelenfxoutputs for fxoutput ptoutput attr in zipfxoutputs ptoutputs attributes self checkptflaxoutputsfxoutput ptoutput modelclass toltol nameattr elif isinstancefxoutputs jnp ndarray self asserttrue isinstanceptoutputs torch tensor fname ptoutputs should a tensor when fxoutputs is using np asarray gives valueerror assignment destination is readonly at the line fxoutputsfxnans 0 fxoutputs np arrayfxoutputs ptoutputs ptoutputs detach tocpu numpy self assertequal fxoutputs shape ptoutputs shape fname output shapes differ between flax and pytorch deal with numpy s scalars to make replacing nan values by 0 work if np isscalarfxoutputs fxoutputs np arrayfxoutputs ptoutputs np arrayptoutputs fxnans np isnanfxoutputs ptnans np isnanptoutputs ptoutputsfxnans 0 fxoutputsfxnans 0 ptoutputsptnans 0 fxoutputsptnans 0 maxdiff np amaxnp absfxoutputs ptoutputs self assertlessequal maxdiff tol fname difference between pytorch and flax is maxdiff tol else raise valueerror fxoutputs should be an instance of modeloutput a tuple or an instance of jnp ndarray got f typefxoutputs instead isptflaxcrosstest def testequivalencepttoflaxself it might be better to put this inside the for loop below because we modify the config there but logically it is fine config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses with self subtestmodelclass name output all for aggressive testing config outputhiddenstates true config outputattentions self hasattentions prepare inputs preparedinputsdict self prepareforclassinputsdict modelclass ptinputs k torch tensorv tolist devicetorchdevice for k v in preparedinputsdict items load corresponding pytorch class ptmodelclassname modelclass name4 skip the flax at the beginning ptmodelclass getattrtransformers ptmodelclassname ptmodel ptmodelclassconfig eval flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model ptmodel config usecache false fxmodel modelclassconfig dtypejnp float32 fxstate convertpytorchstatedicttoflaxptmodel statedict fxmodel fxmodel params fxstate send pytorch model to the correct device ptmodel totorchdevice with torch nograd ptoutputs ptmodelptinputs fxoutputs fxmodelpreparedinputsdict fxkeys tuplek for k v in fxoutputs items if v is not none ptkeys tuplek for k v in ptoutputs items if v is not none self assertequalfxkeys ptkeys self checkptflaxoutputsfxoutputs ptoutputs modelclass with tempfile temporarydirectory as tmpdirname ptmodel savepretrainedtmpdirname fxmodelloaded modelclass frompretrainedtmpdirname frompttrue fxoutputsloaded fxmodelloadedpreparedinputsdict fxkeys tuplek for k v in fxoutputsloaded items if v is not none ptkeys tuplek for k v in ptoutputs items if v is not none self assertequalfxkeys ptkeys self checkptflaxoutputsfxoutputsloaded ptoutputs modelclass isptflaxcrosstest def testequivalenceflaxtoptself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses with self subtestmodelclass name output all for aggressive testing config outputhiddenstates true config outputattentions self hasattentions prepare inputs preparedinputsdict self prepareforclassinputsdict modelclass ptinputs k torch tensorv tolist devicetorchdevice for k v in preparedinputsdict items load corresponding pytorch class ptmodelclassname modelclass name4 skip the flax at the beginning ptmodelclass getattrtransformers ptmodelclassname ptmodel ptmodelclassconfig eval flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model ptmodel config usecache false fxmodel modelclassconfig dtypejnp float32 ptmodel loadflaxweightsinpytorchmodelptmodel fxmodel params make sure weights are tied in pytorch ptmodel tieweights send pytorch model to the correct device ptmodel totorchdevice with torch nograd ptoutputs ptmodelptinputs fxoutputs fxmodelpreparedinputsdict fxkeys tuplek for k v in fxoutputs items if v is not none ptkeys tuplek for k v in ptoutputs items if v is not none self assertequalfxkeys ptkeys self checkptflaxoutputsfxoutputs ptoutputs modelclass with tempfile temporarydirectory as tmpdirname fxmodel savepretrainedtmpdirname ptmodelloaded ptmodelclass frompretrainedtmpdirname fromflaxtrue send pytorch model to the correct device ptmodelloaded totorchdevice ptmodelloaded eval with torch nograd ptoutputsloaded ptmodelloadedptinputs fxkeys tuplek for k v in fxoutputs items if v is not none ptkeys tuplek for k v in ptoutputsloaded items if v is not none self assertequalfxkeys ptkeys self checkptflaxoutputsfxoutputs ptoutputsloaded modelclass def testfrompretrainedsavepretrainedself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses with self subtestmodelclass name model modelclassconfig preparedinputsdict self prepareforclassinputsdict modelclass outputs modelpreparedinputsdict totuple verify that normal savepretrained works as expected with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname the config file and the generation config file if it can generate should be saved self asserttrueos path existsos path jointmpdirname configname self assertequal model cangenerate os path existsos path jointmpdirname generationconfigname modelloaded modelclass frompretrainedtmpdirname outputsloaded modelloadedpreparedinputsdict totuple for outputloaded output in zipoutputsloaded outputs self assertalmostequalsoutputloaded output 1e3 verify that savepretrained for distributed training with paramsparams works as expected with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname paramsmodel params modelloaded modelclass frompretrainedtmpdirname outputsloaded modelloadedpreparedinputsdict totuple for outputloaded output in zipoutputsloaded outputs self assertalmostequalsoutputloaded output 1e3 def testsaveloadfrombaseself config self modeltester prepareconfigandinputsforcommon baseclass flaxmodelmappingconfig class for modelclass in self allmodelclasses if modelclass baseclass continue model baseclassconfig baseparams getparamsmodel params check that all base model weights are loaded correctly with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname headmodel modelclass frompretrainedtmpdirname baseparamfromhead getparamsheadmodel params fromheadprefixheadmodel basemodelprefix for key in baseparamfromhead keys maxdiff baseparamskey baseparamfromheadkey sum item self assertlessequalmaxdiff 1e3 msgfkey not identical def testsaveloadtobaseself config self modeltester prepareconfigandinputsforcommon baseclass flaxmodelmappingconfig class for modelclass in self allmodelclasses if modelclass baseclass continue model modelclassconfig baseparamsfromhead getparamsmodel params fromheadprefixmodel basemodelprefix check that all base model weights are loaded correctly with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname basemodel baseclass frompretrainedtmpdirname baseparams getparamsbasemodel params for key in baseparamsfromhead keys maxdiff baseparamskey baseparamsfromheadkey sum item self assertlessequalmaxdiff 1e3 msgfkey not identical isptflaxcrosstest def testsaveloadfrombaseptself config self modeltester prepareconfigandinputsforcommon baseclass flaxmodelmappingconfig class for modelclass in self allmodelclasses if modelclass baseclass continue model baseclassconfig baseparams getparamsmodel params convert flax model to pytorch model ptmodelclass getattrtransformers baseclass name4 skip the flax at the beginning ptmodel ptmodelclassconfig eval ptmodel loadflaxweightsinpytorchmodelptmodel model params check that all base model weights are loaded correctly with tempfile temporarydirectory as tmpdirname save pt model ptmodel savepretrainedtmpdirname headmodel modelclass frompretrainedtmpdirname frompttrue baseparamfromhead getparamsheadmodel params fromheadprefixheadmodel basemodelprefix for key in baseparamfromhead keys maxdiff baseparamskey baseparamfromheadkey sum item self assertlessequalmaxdiff 1e3 msgfkey not identical isptflaxcrosstest def testsaveloadtobaseptself config self modeltester prepareconfigandinputsforcommon baseclass flaxmodelmappingconfig class for modelclass in self allmodelclasses if modelclass baseclass continue model modelclassconfig baseparamsfromhead getparamsmodel params fromheadprefixmodel basemodelprefix convert flax model to pytorch model ptmodelclass getattrtransformers modelclass name4 skip the flax at the beginning ptmodel ptmodelclassconfig eval ptmodel loadflaxweightsinpytorchmodelptmodel model params check that all base model weights are loaded correctly with tempfile temporarydirectory as tmpdirname ptmodel savepretrainedtmpdirname basemodel baseclass frompretrainedtmpdirname frompttrue baseparams getparamsbasemodel params for key in baseparamsfromhead keys maxdiff baseparamskey baseparamsfromheadkey sum item self assertlessequalmaxdiff 1e3 msgfkey not identical isptflaxcrosstest def testsaveloadbf16tobaseptself config self modeltester prepareconfigandinputsforcommon baseclass flaxmodelmappingconfig class for modelclass in self allmodelclasses if modelclass baseclass continue model modelclassconfig model params model tobf16model params baseparamsfromhead getparamsmodel params fromheadprefixmodel basemodelprefix convert flax model to pytorch model ptmodelclass getattrtransformers modelclass name4 skip the flax at the beginning ptmodel ptmodelclassconfig eval ptmodel loadflaxweightsinpytorchmodelptmodel model params check that all base model weights are loaded correctly with tempfile temporarydirectory as tmpdirname ptmodel savepretrainedtmpdirname basemodel baseclass frompretrainedtmpdirname frompttrue baseparams getparamsbasemodel params for key in baseparamsfromhead keys maxdiff baseparamskey baseparamsfromheadkey sum item self assertlessequalmaxdiff 1e3 msgfkey not identical def testjitcompilationself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses with self subtestmodelclass name preparedinputsdict self prepareforclassinputsdict modelclass model modelclassconfig jax jit def modeljittedinputids attentionmasknone kwargs return modelinputidsinputids attentionmaskattentionmask kwargs with self subtestjit enabled jittedoutputs modeljittedpreparedinputsdict totuple with self subtestjit disabled with jax disablejit outputs modeljittedpreparedinputsdict totuple self assertequallenoutputs lenjittedoutputs for jittedoutput output in zipjittedoutputs outputs self assertequaljittedoutput shape output shape def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys if model config isencoderdecoder expectedargnames inputids attentionmask decoderinputids decoderattentionmask self assertlistequalargnames lenexpectedargnames expectedargnames else expectedargnames inputids attentionmask self assertlistequalargnames 2 expectedargnames def testnamingconventionself for modelclass in self allmodelclasses modelclassname modelclass name moduleclassname modelclassname 5 module if modelclassname5 model else modelclassname module bertmodelingflaxmodule importmodelclass module fromlistmoduleclassname modulecls getattrbertmodelingflaxmodule moduleclassname self assertisnotnonemodulecls def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester encoderseqlength else seqlength self modeltester seqlength self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize if config isencoderdecoder hiddenstates outputs decoderhiddenstates self assertisinstancehiddenstates list tuple self assertequallenhiddenstates expectednumlayers seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen self assertlistequal listhiddenstates0 shape2 decoderseqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testattentionoutputsself if not self hasattentions self skiptestreasonmodel does not output attentions config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlength getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlength encoderseqlength getattrself modeltester encoderseqlength seqlength decoderkeylength getattrself modeltester decoderkeylength decoderseqlength encoderkeylength getattrself modeltester keylength encoderseqlength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false model modelclassconfig outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength outlen lenoutputs if self isencoderdecoder correctoutlen 5 question answering model returns startlogits and endlogits if modelclass in getvaluesflaxmodelforquestionansweringmapping correctoutlen 1 startlogits and endlogits instead of only 1 output self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength encoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes elif self isencoderdecoder addedhiddenstates 2 else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength def testloadwithmismatchedshapesself if not self testmismatchedshapes return config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses if modelclass not in getvaluesflaxmodelforsequenceclassificationmapping continue with self subtestmsgftesting modelclass with tempfile temporarydirectory as tmpdir model modelclassconfig model savepretrainedtmpdir fails when we don t set ignoremismatchedsizestrue with self assertraisesvalueerror newmodel flaxautomodelforsequenceclassification frompretrainedtmpdir numlabels42 with self assertraisesvalueerror newmodelwithoutprefix flaxautomodel frompretrainedtmpdir vocabsize10 logger logging getloggertransformers modelingflaxutils with captureloggerlogger as cl newmodel flaxautomodelforsequenceclassification frompretrained tmpdir numlabels42 ignoremismatchedsizestrue self assertinthe shapes did not match cl out logits newmodelinputsdictlogits self assertequallogits shape1 42 with captureloggerlogger as cl newmodelwithoutprefix flaxautomodel frompretrained tmpdir vocabsize10 ignoremismatchedsizestrue self assertinthe shapes did not match cl out inputids idstensor2 8 10 if self isencoderdecoder newmodelwithoutprefixinputids decoderinputidsinputids else newmodelwithoutprefixinputids def testdefaultparamsdtypeself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses check if all params are still in float32 when dtype of computation is halfprecision model modelclassconfig dtypejnp float16 types jax treeutil treemaplambda x x dtype model params types flattendicttypes for name type in types items self assertequalstype jnp float32 msgfparam name is not initialized in fp32 def testtobf16self config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig cast all params to bf16 params model tobf16model params types flattendictjax treeutil treemaplambda x x dtype params test if all params are in bf16 for name type in types items self assertequaltype jnp bfloat16 msgfparam name is not in bf16 test masking flatparams flattendictparams key random choicelistflatparams keys choose a random param mask path path key for path in flatparams don t cast the key mask unflattendictmask params model tobf16model params mask types flattendictjax treeutil treemaplambda x x dtype params test if all params are in bf16 except key for name type in types items if name key self assertequaltype jnp float32 msgfparam name should be in fp32 else self assertequaltype jnp bfloat16 msgfparam name is not in bf16 def testtofp16self config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig cast all params to fp16 params model tofp16model params types flattendictjax treeutil treemaplambda x x dtype params test if all params are in fp16 for name type in types items self assertequaltype jnp float16 msgfparam name is not in fp16 test masking flatparams flattendictparams key random choicelistflatparams keys choose a random param mask path path key for path in flatparams don t cast the key mask unflattendictmask params model tofp16model params mask types flattendictjax treeutil treemaplambda x x dtype params test if all params are in fp16 except key for name type in types items if name key self assertequaltype jnp float32 msgfparam name should be in fp32 else self assertequaltype jnp float16 msgfparam name is not in fp16 def testtofp32self config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig cast all params to fp16 and back to fp32 params model tofp16model params params model tofp32params test if all params are in fp32 types flattendictjax treeutil treemaplambda x x dtype params for name type in types items self assertequaltype jnp float32 msgfparam name is not in fp32 test masking flatparams flattendictparams key random choicelistflatparams keys choose a random param mask path path key for path in flatparams don t cast the key mask unflattendictmask cast to fp16 and back to fp32 with mask params model tofp16model params params model tofp32params mask test if all params are in fp32 except key types flattendictjax treeutil treemaplambda x x dtype params for name type in types items if name key self assertequaltype jnp float16 msgfparam name should be in fp16 else self assertequaltype jnp float32 msgfparam name is not in fp32 def testsaveloadinfp16self config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig convert weights to fp16 and save params model tofp16model params with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname paramsparams load the weights again and check if they are still in fp16 model modelclass frompretrainedtmpdirname types flattendictjax treeutil treemaplambda x x dtype model params for name type in types items self assertequaltype jnp float16 msgfparam name is not in fp16 def testsaveloadinbf16self config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig convert weights to bf16 and save params model tobf16model params with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname paramsparams load the weights again and check if they are still in fp16 model modelclass frompretrainedtmpdirname types flattendictjax treeutil treemaplambda x x dtype model params for name type in types items self assertequaltype jnp bfloat16 msgfparam name is not in bf16 def testmodelmaininputnameself for modelclass in self allmodelclasses modelsignature inspect signaturegetattrmodelclass call the main input is the name of the argument after self observedmaininputname listmodelsignature parameters keys1 self assertequalmodelclass maininputname observedmaininputname def testheadmaskingself if not self testheadmasking return config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true def preparelayerheadmaski attentionheads numhiddenlayers if i 0 return np concatenatenp zeros1 dtypejnp int32 np onesattentionheads 1 dtypejnp int32 if i numhiddenlayers 1 return np concatenatenp zerosattentionheads 1 dtypejnp int32 np ones1 dtypejnp int32 return np onesattentionheads dtypejnp int32 for modelclass in self allmodelclasses model modelclassconfig inputsdictoutputattentions true inputsdictoutputhiddenstates false inputs self prepareforclassinputsdict modelclass copy prepare head mask inputsheadmask np stack preparelayerheadmaski config numattentionheads config numhiddenlayers for i in rangeconfig numhiddenlayers outputs modelinputs def checkattentionsvalidityattentions remove nan for t in attentions check we don t have more than 25 nans arbitrary self assertlessnp isnant sum t size 4 attentions np wherenp isnant 0 0 t for t in attentions self assertalmostequalattentions0 0 sum 0 0 self assertnotequalattentions0 1 sum 0 0 if lenattentions 2 encoderdecodere models have only 2 layers in each modules self assertnotequalattentions1 0 sum 0 0 self assertalmostequalattentions1 2 sum 0 0 self assertnotequalattentions1 1 sum 0 0 if model config isencoderdecoder raise notimplementederrorthe test has not been implemented for encoderdecoder models yet else checkattentionsvalidityoutputs attentions def testnoautomaticinitself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses model modelclassconfig doinitfalse check that accesing parmas raises an valueerror when doinit is false with self assertraisesvalueerror params model params check if we params can be properly initialized when calling initweights params model initweightsmodel key model inputshape self assertisinstanceparams frozendict check if all required parmas are initialized keys setflattendictunfreezeparams keys self asserttrueallk in keys for k in model requiredparams check if the shapes match flatparams flattendictunfreezeparams for k v in flattendictunfreezemodel paramsshapetree items self assertequal v shape flatparamsk shape shapes of do not match expecting got formatk v shape flatparamsk shape check that setting params raises an valueerror when doinit is false with self assertraisesvalueerror model params params check if we can do a forward pass inputsdictoutputhiddenstates true inputs self prepareforclassinputsdict modelclass copy modelinputs paramsparams def testfrompretrainedwithnoautomaticinitself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true def assertallparamsinitialisedmodel params check if all required parmas are loaded keys setflattendictunfreezeparams keys self asserttrueallk in keys for k in model requiredparams check if the shapes match flatparams flattendictunfreezeparams for k v in flattendictunfreezemodel paramsshapetree items self assertequal v shape flatparamsk shape shapes of do not match expecting got formatk v shape flatparamsk shape for modelclass in self allmodelclasses init the model model modelclassconfig save the model in the temporary directory load the saved model with doinitfalse with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model params modelclass frompretrainedtmpdirname doinitfalse check that accesing parmas raises an valueerror when doinit is false with self assertraisesvalueerror params model params check if all required parmas are loaded assertallparamsinitialisedmodel params check that setting params raises an valueerror when doinit is false with self assertraisesvalueerror model params params check if initweights initializes missing keys from frompretrained flatparams flattendictunfreezeparams randomkey random choicelistflatparams keys flatparams poprandomkey params freezeunflattendictflatparams with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname paramsparams model params modelclass frompretrainedtmpdirname doinitfalse params model initweightsmodel key model inputshape paramsparams check if all required parmas are loaded assertallparamsinitialisedmodel params def testcheckpointshardingfromhubself model flaxbertmodel frompretrainedarthurzflaxtinyrandombertsharded the model above is the same as the model below just a sharded version refmodel flaxbertmodel frompretrainedhfinternaltestingtinybertflaxonly for p1 p2 in zipflattendictmodel params values flattendictrefmodel params values assert np allclosenp arrayp1 np arrayp2 def testcheckpointshardinglocalself model flaxbertmodel frompretrainedhfinternaltestingtinybertflaxonly with tempfile temporarydirectory as tmpdir we use the same folder for various sizes to make sure a new save erases the old checkpoint for maxsize in 150kb 150kib 200kb 200kib model savepretrainedtmpdir maxshardsizemaxsize get each shard file and its size shardtosize for shard in os listdirtmpdir if shard endswith msgpack shardfile os path jointmpdir shard shardtosizeshardfile os path getsizeshardfile indexfile os path jointmpdir flaxweightsindexname check there is an index but no regular weight file self asserttrueos path isfileindexfile self assertfalseos path isfileos path jointmpdir flaxweightsname check a file is bigger than maxsize only when it has a single weight for shardfile size in shardtosize items if maxsize endswithkib maxsizeint intmaxsize 3 210 else maxsizeint intmaxsize 2 103 note pickle adds some junk so the weight of the file can end up being slightly bigger than the size asked for since we count parameters if size maxsizeint 50000 with openshardfile rb as statef statefile frombytesflaxbertmodel statef read self assertequallenstatefile 1 check the index and the shard files found match with openindexfile r encodingutf8 as f index json loadsf read allshards setindexweightmap values shardsfound f for f in os listdirtmpdir if f endswith msgpack self assertsetequalallshards shardsfound finally check the model can be reloaded newmodel flaxbertmodel frompretrainedtmpdir for p1 p2 in zipflattendictmodel params values flattendictnewmodel params values self asserttruenp allclosenp arrayp1 np arrayp2 isptflaxcrosstest def testfromshardedptself model flaxbertmodel frompretrainedhfinternaltestingtinyrandombertsharded frompttrue refmodel flaxbertmodel frompretrainedhfinternaltestingtinyrandombertfxonly for key refval in flattendictrefmodel params items val flattendictmodel paramskey assert np allclosenp arrayval np arrayrefval def testgradientcheckpointingself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses prepare inputs preparedinputsdict self prepareforclassinputsdict modelclass model modelclassconfig rematmodel modelclassconfig try rematmodel enablegradientcheckpointing except notimplementederror continue outputs modelpreparedinputsdict rematoutputs rematmodelpreparedinputsdict ensure that the dicts of outputs contain the same keys self assertequaloutputs keys rematoutputs keys outputs outputs totuple rematoutputs rematoutputs totuple ensure that the outputs remain precisely equal for output rematoutput in zipoutputs rematoutputs self asserttrueoutput rematoutput all 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license assumed parallelism 8 creates a random int32 tensor of the shape within the vocab size creates a random float32 tensor make sure that at least one token is attended to for each batch function extracts relevant parameters into flatten dict from model params appends batch normalization statistics if present if both parameters and batch normalization statistics are present extract only parameters for the specified head prefix if specified and add batch statistics only parameters are present hack for now until we have automodel classes copied from tests test_modeling_common modeltestermixin check_pt_flax_outputs args model_class the class of the model that is currently testing for example etc currently unused but it could make debugging easier and faster names a string or a list of strings these specify what fx_outputs pt_outputs represent in the model outputs currently unused but in the future we could use this information to make the error message clearer by giving the name s of the output tensor s with large difference s between pt and flax allow modeloutput e g clipoutput has text_model_output and vision_model_output convert to the case of tuple appending each key to the current string name allow list e g transfoxlmodeloutput mems is a list of tensors case 1 each output has assigned name e g a tuple form of a modeloutput case 2 each output has no assigned name e g hidden states of each layer add an index to name using np asarray gives valueerror assignment destination is read only at the line fx_outputs fx_nans 0 deal with numpy s scalars to make replacing nan values by 0 work it might be better to put this inside the for loop below because we modify the config there but logically it is fine output all for aggressive testing prepare inputs load corresponding pytorch class skip the flax at the beginning flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model send pytorch model to the correct device output all for aggressive testing prepare inputs load corresponding pytorch class skip the flax at the beginning flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model make sure weights are tied in pytorch send pytorch model to the correct device send pytorch model to the correct device verify that normal save_pretrained works as expected the config file and the generation config file if it can generate should be saved verify that save_pretrained for distributed training with params params works as expected check that all base model weights are loaded correctly check that all base model weights are loaded correctly convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly save pt model convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly signature parameters is an ordereddict so arg_names order is deterministic check that output_hidden_states also work using config check that output_attentions also work using config question answering model returns start_logits and end_logits start_logits and end_logits instead of only 1 output decoder attentions cross attentions check attention is always last and order is fine fails when we don t set ignore_mismatched_sizes true check if all params are still in float32 when dtype of computation is half precision cast all params to bf16 test if all params are in bf16 test masking choose a random param don t cast the key test if all params are in bf16 except key cast all params to fp16 test if all params are in fp16 test masking choose a random param don t cast the key test if all params are in fp16 except key cast all params to fp16 and back to fp32 test if all params are in fp32 test masking choose a random param don t cast the key cast to fp16 and back to fp32 with mask test if all params are in fp32 except key convert weights to fp16 and save load the weights again and check if they are still in fp16 convert weights to bf16 and save load the weights again and check if they are still in fp16 the main input is the name of the argument after self prepare head mask remove nan check we don t have more than 25 nans arbitrary encoder decodere models have only 2 layers in each modules check that accesing parmas raises an valueerror when _do_init is false check if we params can be properly initialized when calling init_weights check if all required parmas are initialized check if the shapes match check that setting params raises an valueerror when _do_init is false check if we can do a forward pass check if all required parmas are loaded check if the shapes match init the model save the model in the temporary directory load the saved model with _do_init false check that accesing parmas raises an valueerror when _do_init is false check if all required parmas are loaded check that setting params raises an valueerror when _do_init is false check if init_weights initializes missing keys from from_pretrained check if all required parmas are loaded the model above is the same as the model below just a sharded version we use the same folder for various sizes to make sure a new save erases the old checkpoint get each shard file and its size check there is an index but no regular weight file check a file is bigger than max_size only when it has a single weight note pickle adds some junk so the weight of the file can end up being slightly bigger than the size asked for since we count parameters check the index and the shard files found match finally check the model can be reloaded prepare inputs ensure that the dicts of outputs contain the same keys ensure that the outputs remain precisely equal
import copy import inspect import json import random import tempfile from typing import List, Tuple import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import CaptureLogger, is_pt_flax_cross_test, require_flax, torch_device from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput if is_flax_available(): import os import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict from transformers import ( FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForSequenceClassification, FlaxBertModel, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.modeling_flax_utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" if is_torch_available(): import torch def ids_tensor(shape, vocab_size, rng=None): if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def floats_tensor(shape, scale=1.0, rng=None, name=None): if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return np.array(values, dtype=jnp.float32).reshape(shape) def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) attn_mask[:, -1] = 1 return attn_mask def get_params(params, from_head_prefix=None): if "batch_stats" in params: if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params["params"][from_head_prefix])) extracted_params.update(flatten_dict(params["batch_stats"][from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params["params"])) extracted_params.update(flatten_dict(params["batch_stats"])) else: if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params[from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params)) return extracted_params @require_flax class FlaxModelTesterMixin: model_tester = None all_model_classes = () test_mismatched_shapes = True is_encoder_decoder = False test_head_masking = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class): inputs_dict = copy.deepcopy(inputs_dict) if "ForMultipleChoice" in model_class.__name__: inputs_dict = { k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1])) if isinstance(v, (jnp.ndarray, np.ndarray)) and k != "indices_prng_key" else v for k, v in inputs_dict.items() } return inputs_dict def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): config.output_hidden_states = True config.output_attentions = self.has_attentions prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): config.output_hidden_states = True config.output_attentions = self.has_attentions prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) pt_model.tie_weights() pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_from_pretrained_save_pretrained(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs_dict).to_tuple() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=model.params) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) pt_model_class = getattr(transformers, base_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) pt_model_class = getattr(transformers, model_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) pt_model_class = getattr(transformers, model_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids", "attention_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_naming_convention(self): for model_class in self.all_model_classes: model_class_name = model_class.__name__ module_class_name = ( model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module" ) bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name]) module_cls = getattr(bert_modeling_flax_module, module_class_name) self.assertIsNotNone(module_cls) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) with self.assertRaises(ValueError): new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_flax_utils") with CaptureLogger(logger) as cl: new_model = FlaxAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs_dict)["logits"] self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = FlaxAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_default_params_dtype(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config, dtype=jnp.float16) types = jax.tree_util.tree_map(lambda x: x.dtype, model.params) types = flatten_dict(types) for name, type_ in types.items(): self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.") def test_to_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) params = model.to_bf16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) mask = {path: path != key for path in flat_params} mask = unflatten_dict(mask) params = model.to_bf16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_to_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) params = model.to_fp16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) mask = {path: path != key for path in flat_params} mask = unflatten_dict(mask) params = model.to_fp16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_to_fp32(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) params = model.to_fp16(model.params) params = model.to_fp32(params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) mask = {path: path != key for path in flat_params} mask = unflatten_dict(mask) params = model.to_fp16(model.params) params = model.to_fp32(params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.") else: self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") def test_save_load_in_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) params = model.to_fp16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_save_load_in_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) params = model.to_bf16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "__call__")) observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_headmasking(self): if not self.test_head_masking: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)]) if i == num_hidden_layers - 1: return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)]) return np.ones(attention_heads, dtype=jnp.int32) for model_class in self.all_model_classes: model = model_class(config) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = np.stack( [ _prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ] ) outputs = model(**inputs) def _check_attentions_validity(attentions): for t in attentions: self.assertLess(np.isnan(t).sum(), t.size / 4) attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions] self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0) if len(attentions) > 2: self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0) if model.config.is_encoder_decoder: raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.") else: _check_attentions_validity(outputs.attentions) def test_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: model = model_class(config, _do_init=False) with self.assertRaises(ValueError): params = model.params params = model.init_weights(model.key, model.input_shape) self.assertIsInstance(params, FrozenDict) keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) with self.assertRaises(ValueError): model.params = params inputs_dict["output_hidden_states"] = True inputs = self._prepare_for_class(inputs_dict, model_class).copy() model(**inputs, params=params) def test_from_pretrained_with_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _assert_all_params_initialised(model, params): keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) with self.assertRaises(ValueError): params = model.params _assert_all_params_initialised(model, params) with self.assertRaises(ValueError): model.params = params flat_params = flatten_dict(unfreeze(params)) random_key = random.choice(list(flat_params.keys())) flat_params.pop(random_key) params = freeze(unflatten_dict(flat_params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) params = model.init_weights(model.key, model.input_shape, params=params) _assert_all_params_initialised(model, params) def test_checkpoint_sharding_from_hub(self): model = FlaxBertModel.from_pretrained("ArthurZ/flax-tiny-random-bert-sharded") ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(ref_model.params).values()): assert np.allclose(np.array(p1), np.array(p2)) def test_checkpoint_sharding_local(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".msgpack"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, FLAX_WEIGHTS_INDEX_NAME) self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 if size >= max_size_int + 50000: with open(shard_file, "rb") as state_f: state_file = from_bytes(FlaxBertModel, state_f.read()) self.assertEqual(len(state_file), 1) with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".msgpack")} self.assertSetEqual(all_shards, shards_found) new_model = FlaxBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()): self.assertTrue(np.allclose(np.array(p1), np.array(p2))) @is_pt_flax_cross_test def test_from_sharded_pt(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only") for key, ref_val in flatten_dict(ref_model.params).items(): val = flatten_dict(model.params)[key] assert np.allclose(np.array(val), np.array(ref_val)) def test_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) remat_model = model_class(config) try: remat_model.enable_gradient_checkpointing() except NotImplementedError: continue outputs = model(**prepared_inputs_dict) remat_outputs = remat_model(**prepared_inputs_dict) self.assertEqual(outputs.keys(), remat_outputs.keys()) outputs = outputs.to_tuple() remat_outputs = remat_outputs.to_tuple() for output, remat_output in zip(outputs, remat_outputs): self.assertTrue((output == remat_output).all())
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license reset repo push to hub via savepretrained reset repo push to hub via savepretrained no msgpack file only a model safetensors check we have a model safetensors file check models are equal this test checks that we can load safetensors from a checkpoint that only has those on the hub can load from the flaxformatted checkpoint this test checks that we can load safetensors from a checkpoint that only has those on the hub this test checks that we can load safetensors from a checkpoint that only has those on the hub saved in the pt format can load from the pytorchformatted checkpoint this test checks that we can load safetensors from a checkpoint that only has those on the hub saved in the pt format can load from the pytorchformatted checkpoint this test checks that we cannot load safetensors from a checkpoint that only has safetensors saved in the pt format if torch isn t installed this test verifies that a correct error message is shown when loading from a pt safetensors pytorch shouldn t be installed for this to work correctly cannot load from the pytorchformatted checkpoint without pytorch installed this test checks that we cannot load safetensors from a checkpoint that only has safetensors saved in the pt format if torch isn t installed this test verifies that a correct error message is shown when loading from a pt safetensors pytorch shouldn t be installed for this to work correctly cannot load from the pytorchformatted checkpoint without pytorch installed this test checks that we ll first download msgpack weights before safetensors the safetensors file on that repo is a pt safetensors and therefore cannot be loaded without pytorch this test checks that we ll first download msgpack weights before safetensors the safetensors file on that repo is a pt safetensors and therefore cannot be loaded without pytorch this should not raise even if there are two types of sharded weights this should not raise even if there are two types of sharded weights this should discard the safetensors weights in favor of the msgpack sharded weights 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license assumed parallelism 8 reset repo push to hub via save_pretrained reset repo push to hub via save_pretrained no msgpack file only a model safetensors check we have a model safetensors file check models are equal this test checks that we can load safetensors from a checkpoint that only has those on the hub can load from the flax formatted checkpoint this test checks that we can load safetensors from a checkpoint that only has those on the hub this test checks that we can load safetensors from a checkpoint that only has those on the hub saved in the pt format can load from the pytorch formatted checkpoint this test checks that we can load safetensors from a checkpoint that only has those on the hub saved in the pt format can load from the pytorch formatted checkpoint this test checks that we cannot load safetensors from a checkpoint that only has safetensors saved in the pt format if torch isn t installed this test verifies that a correct error message is shown when loading from a pt safetensors pytorch shouldn t be installed for this to work correctly cannot load from the pytorch formatted checkpoint without pytorch installed this test checks that we cannot load safetensors from a checkpoint that only has safetensors saved in the pt format if torch isn t installed this test verifies that a correct error message is shown when loading from a pt safetensors pytorch shouldn t be installed for this to work correctly cannot load from the pytorch formatted checkpoint without pytorch installed this test checks that we ll first download msgpack weights before safetensors the safetensors file on that repo is a pt safetensors and therefore cannot be loaded without pytorch this test checks that we ll first download msgpack weights before safetensors the safetensors file on that repo is a pt safetensors and therefore cannot be loaded without pytorch this should not raise even if there are two types of sharded weights this should not raise even if there are two types of sharded weights this should discard the safetensors weights in favor of the msgpack sharded weights
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo, snapshot_download from requests.exceptions import HTTPError from transformers import BertConfig, BertModel, is_flax_available, is_torch_available from transformers.testing_utils import ( TOKEN, USER, is_pt_flax_cross_test, is_staging_test, require_flax, require_safetensors, require_torch, ) from transformers.utils import FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_NAME if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" @require_flax @is_staging_test class FlaxModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model-flax") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-flax-org") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("test-model-flax", token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") delete_repo(token=self._token, repo_id="test-model-flax") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model-flax", push_to_hub=True, token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("valid_org/test-model-flax-org", token=self._token) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") delete_repo(token=self._token, repo_id="valid_org/test-model-flax-org") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, repo_id="valid_org/test-model-flax-org", push_to_hub=True, token=self._token ) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def check_models_equal(model1, model2): models_are_equal = True flat_params_1 = flatten_dict(model1.params) flat_params_2 = flatten_dict(model2.params) for key in flat_params_1.keys(): if np.sum(np.abs(flat_params_1[key] - flat_params_2[key])) > 1e-4: models_are_equal = False return models_are_equal @require_flax class FlaxModelUtilsTest(unittest.TestCase): def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) @require_safetensors def test_safetensors_save_and_load(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(model, new_model)) @require_flax @require_torch @is_pt_flax_cross_test def test_safetensors_save_and_load_pt_to_flax(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True) pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: pt_model.save_pretrained(tmp_dir) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(model, new_model)) @require_safetensors def test_safetensors_load_from_hub(self): flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-safetensors-only") self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_safetensors def test_safetensors_load_from_local(self): with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-flax-only", cache_dir=tmp) flax_model = FlaxBertModel.from_pretrained(location) with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-flax-safetensors-only", cache_dir=tmp) safetensors_model = FlaxBertModel.from_pretrained(location) self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_torch @require_safetensors @is_pt_flax_cross_test def test_safetensors_load_from_hub_from_safetensors_pt(self): flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-msgpack") safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_torch @require_safetensors @is_pt_flax_cross_test def test_safetensors_load_from_local_from_safetensors_pt(self): with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-msgpack", cache_dir=tmp) flax_model = FlaxBertModel.from_pretrained(location) with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) safetensors_model = FlaxBertModel.from_pretrained(location) self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_safetensors def test_safetensors_load_from_hub_from_safetensors_pt_without_torch_installed(self): if is_torch_available(): return with self.assertRaises(ModuleNotFoundError): _ = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") @require_safetensors def test_safetensors_load_from_local_from_safetensors_pt_without_torch_installed(self): if is_torch_available(): return with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) with self.assertRaises(ModuleNotFoundError): _ = FlaxBertModel.from_pretrained(location) @require_safetensors def test_safetensors_load_from_hub_msgpack_before_safetensors(self): FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-msgpack") @require_safetensors def test_safetensors_load_from_local_msgpack_before_safetensors(self): with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors-msgpack", cache_dir=tmp) FlaxBertModel.from_pretrained(location) @require_safetensors def test_safetensors_flax_from_flax(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(model, new_model)) @require_safetensors @require_torch def test_safetensors_flax_from_torch(self): hub_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(hub_model, new_model)) @require_safetensors def test_safetensors_flax_from_sharded_msgpack_with_sharded_safetensors_local(self): with tempfile.TemporaryDirectory() as tmp_dir: path = snapshot_download( "hf-internal-testing/tiny-bert-flax-safetensors-msgpack-sharded", cache_dir=tmp_dir ) FlaxBertModel.from_pretrained(path) @require_safetensors def test_safetensors_flax_from_sharded_msgpack_with_sharded_safetensors_hub(self): FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-safetensors-msgpack-sharded")
codingutf8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license restrict tensorflow to only allocate x gb of memory on the gpus virtual devices must be set before gpus have been initialized when we have enough ctc models for an autoclass we should use their mapping instead of name checks the config file and the generation config file if it can generate should be saved make sure that returned config is jsonifiable which is required by keras make sure it also accepts a normal config check that we have one of three possible outputs none tuple of tensors or a tensor signature parameters is an ordereddict so argnames order is deterministic tf2onnx issue page https github comonnxtensorflowonnxissues2172 todo undo skip once a fix is done in tf2onnx this condition is required since modelingtfclip py has 3 classes whose names end with mainlayer t5mainlayer needs an embedtokens parameter when called without the inputsembeds parameter take the same values than in tft5modeltester for this shared layer make sure we don t have nans don t copy this method to model specific test file todo remove this method once the issues are all fixed make sure no sequence has all zeros as attention mask for k in attentionmask encoderattentionmask decoderattentionmask if k in inputsdict attentionmask inputsdictk make sure no all 0s attention masks to avoid failure at this moment put 1 at the beginning of sequences to make it still work when combining causal attention masks todo remove this line once a fix regarding large negative values for attention mask is done attentionmask tf concat tf oneslikeattentionmask 1 dtypeattentionmask dtype attentionmask 1 axis1 here we make the first sequence with all 0s as attention mask currently this will fail for tfwav2vec2model this is caused by the different large negative values like 1e4 1e9 1e30 and inf for attention mask across modelsframeworks todo enable this block once the large negative values thing is cleaned up see https github comhuggingfacetransformersissues14859 attentionmask tf concat tf zeroslikeattentionmask 1 dtypetf int32 tf castattentionmask1 dtypetf int32 axis0 inputsdictk attentionmask don t copy this method to model specific test file todo remove this method once the issues are all fixed def postprocessingtoignoretestcasesself tfoutputs ptoutputs modelclass tfgpt2 has pastkeyvalues as a tensor while gpt2 has it as a tuple create new outputs from the remaining fields check the outputs from pytorch and tensorflow models are close enough checks are done in a recursive way args modelclass the class of the model that is currently testing for example tfbertmodel tfbertformaskedlm tfbertforsequenceclassification etc mainly used for providing more informative error messages name str the name of the output for example output hiddenstates output attentions etc attributes tuplestr the names of the output s element if the output is a tuplelist with each element being a named field in the output allow modeloutput e g clipoutput has textmodeloutput and visionmodeloutput don t copy this block to model specific test file todo remove this method and this line after issues are fixed convert to the case of tuple appending each key to the current string names allow list e g transfoxlmodeloutput mems is a list of tensors case 1 each output has assigned name e g a tuple form of a modeloutput case 2 each output has no assigned name e g hidden states of each layer add an index to names deal with numpy s scalars to make replacing nan values by 0 work other general float inputs send pytorch inputs to the correct device send pytorch model to the correct device check predictions on first output logitshiddenstates are close enough given lowlevel computational differences tf models returned loss is usually a tensor rather than a scalar see hfcomputeloss it uses tf keras losses reduction none change it here to a scalar to match pytorch models loss output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e4 1e9 1e30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it not all models accept labels in the forward pass yet for some models e g base models there is no label returned set the input dict to none to avoid check outputs twice for the same input dicts check we can load pt model in tf and viceversa with model model functions original test check without labels check with labels check we can load pt model in tf and viceversa with checkpoint model functions original test check without labels check with labels prepare our model these are maximally general inputs for the model with multiple none dimensions hopefully this will catch any conditionals that fail for flexible shapes compile extended model check that output attentions can also be changed via the config check attention is always last and order is fine prepare headmask remove nan not all models accept labels in the forward pass yet todo joao after the embeddings refactor is complete rework this test so as to rely exclusively on tf keras layers embedding builds the embeddings layer build the embeddings reshape the embeddings check that the resized embeddings size matches the desired size check that weights remain the same after resizing todo joao this test is not slow but it s tagged as such to keep track of failures on the scheduled ci runs while passing push ci fix the underlying issues and remove the tag create a model with resized expended embeddings fetch the output for an input exclusively made of new members of the vocabulary save and load the model check that the output for the restored model is the same tf embeddings layers don t raise an exception when an index is out of bounds on gpu so we manually raise it this test should only fail on gpu for models where we haven t added the safety check iterate over all generative models if bos token id is not defined model needs inputids numreturnsequences 1 models with nontext inputs won t work here numreturnsequences 1 generating multiple sequences when no beam search generation is not allowed as it would always generate the same sequences numreturnsequences 1 sample check bad words tokens language generation create list of 1seq bad token and list of 2seq of bad tokens only count generated tokens iterate over all generative models if bos token id is not defined model needs inputids numreturnsequences 1 numreturnsequences 1 generating more sequences than having beams leads is not possible numreturnsequences 1 sample numreturnsequences 1 greedy check bad words tokens language generation create list of 1seq bad token and list of 2seq of bad tokens only count generated tokens iterate over all generative models the number of elements in the loss should be the same as the number of elements in the label test that model correctly compute the loss with kwargs test that model correctly compute the loss when we mask some positions test that model correctly compute the loss with a dict test that model correctly compute the loss with a tuple get keys that were added with the prepareforclass function create a dictionary holding the location of the tensors in the tuple initialize a list with their default values update the values and convert to a tuple send to model test that model correctly compute the loss with kwargs we also remove returnloss as this is covered by the trainstep when using fit build the model so we can get some constant weights and check outputs run eagerly to save some expensive compilation times make sure the model fits without crashing regardless of where we pass the labels the next tests only make sense for models with separate inputs and labels and do not make sense for models that don t clearly distinguish between the two e g clip we reinitialize the model here even though our learning rate was zero because batchnorm updates weights by means other than gradient descent after testing that the model accepts all int inputs confirm that its dummies are int32 also confirm that the inputsignature uses int32 we want to test only encoderdecoder models we check the state of decoderattentions and crossattentions just from the last step fails when we don t set ignoremismatchedsizestrue although tf models always have a prefix pointing to mainlayer we still add this without prefix test to keep a consistency between tf and pt tests the main input is the name of the argument after self assert we discarded the unwanted extra column but kept everything else assert we discarded the unwanted extra column but kept everything else make sure there are no pad tokens in prompt which may trigger unwanted behavior due to numerical instability let s fail the test only if there are more than 10 of input sequences give different outputs between xla and nonxla versions if there are less than 10 examples let s be strict and not allow any difference fix config for models with additional sequencelength limiting settings xlnet will raise an exception when trying to set maxpositionembeddings basic quick test for generatecompatible classes that confirms that xlagenerated tokens are the same as their non xla counterparts either the model supports xla generation and passes the inner test or it raises an appropriate exception slow and challenging version of testxlageneratefast for contrastive search contrastive search directly manipulates the model cache and other outputs and this test ensures that they are in a valid format that is also supported by xla either the model supports xla generation and passes the inner test or it raises an appropriate exception slow and challenging version of testxlageneratefast this test asks for several long sequences using beam search with and without xla the two outputs should match and a failure in this test indicates that the model may need further analysis if it is to be used for xla generation either the model supports xla generation and passes the inner test or it raises an appropriate exception special tokens cannot be bad tokens create random bad tokens that are not special tokens for all bad word tokens for all slices in batch for all word idx if tokens match creates a random int32 tensor of the shape within the vocab size if rng is none rng random random totaldims 1 for dim in shape totaldims dim values for in rangetotaldims values appendrng randint0 vocabsize 1 output tf constantvalues shapeshape dtypedtype if dtype is not none else tf int32 return output def randomattentionmaskshape rngnone namenone dtypenone attnmask idstensorshape vocabsize2 rngnone namenone dtypedtype make sure that at least one token is attended to for each batch attnmask tf concatattnmask 1 tf oneslikeattnmask 1 dtypedtype axis1 return attnmask def floatstensorshape scale1 0 rngnone namenone dtypenone coding utf 8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa f401 restrict tensorflow to only allocate x gb of memory on the gpus virtual devices must be set before gpus have been initialized when we have enough ctc models for an autoclass we should use their mapping instead of name checks the config file and the generation config file if it can generate should be saved make sure that returned config is jsonifiable which is required by keras make sure it also accepts a normal config build model check that we have one of three possible outputs none tuple of tensors or a tensor signature parameters is an ordereddict so arg_names order is deterministic tf2onnx issue page https github com onnx tensorflow onnx issues 2172 todo undo skip once a fix is done in tf2onnx this condition is required since modeling_tf_clip py has 3 classes whose names end with mainlayer t5mainlayer needs an embed_tokens parameter when called without the inputs_embeds parameter take the same values than in tft5modeltester for this shared layer make sure we don t have nans don t copy this method to model specific test file todo remove this method once the issues are all fixed make sure no sequence has all zeros as attention mask make sure no all 0s attention masks to avoid failure at this moment put 1 at the beginning of sequences to make it still work when combining causal attention masks todo remove this line once a fix regarding large negative values for attention mask is done here we make the first sequence with all 0s as attention mask currently this will fail for tfwav2vec2model this is caused by the different large negative values like 1e 4 1e 9 1e 30 and inf for attention mask across models frameworks todo enable this block once the large negative values thing is cleaned up see https github com huggingface transformers issues 14859 attention_mask tf concat tf zeros_like attention_mask 1 dtype tf int32 tf cast attention_mask 1 dtype tf int32 axis 0 don t copy this method to model specific test file todo remove this method once the issues are all fixed for temporarily ignoring some failed test cases issues to be fixed tfgpt2 has past_key_values as a tensor while gpt2 has it as a tuple create new outputs from the remaining fields check the outputs from pytorch and tensorflow models are close enough checks are done in a recursive way args model_class the class of the model that is currently testing for example tfbertmodel tfbertformaskedlm tfbertforsequenceclassification etc mainly used for providing more informative error messages name str the name of the output for example output hidden_states output attentions etc attributes tuple str the names of the output s element if the output is a tuple list with each element being a named field in the output allow modeloutput e g clipoutput has text_model_output and vision_model_output don t copy this block to model specific test file todo remove this method and this line after issues are fixed convert to the case of tuple appending each key to the current string names allow list e g transfoxlmodeloutput mems is a list of tensors case 1 each output has assigned name e g a tuple form of a modeloutput case 2 each output has no assigned name e g hidden states of each layer add an index to names deal with numpy s scalars to make replacing nan values by 0 work other general float inputs send pytorch inputs to the correct device send pytorch model to the correct device check predictions on first output logits hidden states are close enough given low level computational differences tf models returned loss is usually a tensor rather than a scalar see hf_compute_loss it uses tf keras losses reduction none change it here to a scalar to match pytorch models loss output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e 4 1e 9 1e 30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it skip the tf at the beginning not all models accept labels in the forward pass yet for some models e g base models there is no label returned set the input dict to none to avoid check outputs twice for the same input dicts check we can load pt model in tf and vice versa with model model functions original test check without labels check with labels check we can load pt model in tf and vice versa with checkpoint model functions original test check without labels check with labels prepare our model these are maximally general inputs for the model with multiple none dimensions hopefully this will catch any conditionals that fail for flexible shapes compile extended model check we can pass inputs with the keras api ensure we can save export the whole functional model differentiation due to newly added cross_attentions check that output attentions can also be changed via the config check attention is always last and order is fine to be sure we have no nan prepare head_mask necessary diferentiation because of t5 model remove nan check we don t have more than 25 nans arbitrary remove them the test is less complete encoder decodere models have only 2 layers in each modules not all models accept labels in the forward pass yet todo joao after the embeddings refactor is complete rework this test so as to rely exclusively on tf keras layers embedding builds the embeddings layer build the embeddings resize_token_embeddings mutates config reshape the embeddings check that the resized embeddings size matches the desired size check that weights remain the same after resizing todo joao this test is not slow but it s tagged as such to keep track of failures on the scheduled ci runs while passing push ci fix the underlying issues and remove the tag create a model with resized expended embeddings resize_token_embeddings mutates config fetch the output for an input exclusively made of new members of the vocabulary save and load the model check that the output for the restored model is the same tf embeddings layers don t raise an exception when an index is out of bounds on gpu so we manually raise it this test should only fail on gpu for models where we haven t added the safety check iterate over all generative models if bos token id is not defined model needs input_ids num_return_sequences 1 models with non text inputs won t work here num_return_sequences 1 generating multiple sequences when no beam search generation is not allowed as it would always generate the same sequences num_return_sequences 1 sample check bad words tokens language generation create list of 1 seq bad token and list of 2 seq of bad tokens only count generated tokens iterate over all generative models if bos token id is not defined model needs input_ids num_return_sequences 1 num_return_sequences 1 generating more sequences than having beams leads is not possible num_return_sequences 1 sample num_return_sequences 1 greedy check bad words tokens language generation create list of 1 seq bad token and list of 2 seq of bad tokens only count generated tokens iterate over all generative models the number of elements in the loss should be the same as the number of elements in the label this test is only for models with easily separable labels test that model correctly compute the loss with kwargs test that model correctly compute the loss when we mask some positions test that model correctly compute the loss with a dict test that model correctly compute the loss with a tuple get keys that were added with the _prepare_for_class function create a dictionary holding the location of the tensors in the tuple initialize a list with their default values update the values and convert to a tuple send to model test that model correctly compute the loss with kwargs we also remove return_loss as this is covered by the train_step when using fit build the model so we can get some constant weights and check outputs run eagerly to save some expensive compilation times make sure the model fits without crashing regardless of where we pass the labels the next tests only make sense for models with separate inputs and labels and do not make sense for models that don t clearly distinguish between the two e g clip we reinitialize the model here even though our learning rate was zero because batchnorm updates weights by means other than gradient descent no integer inputs means no need for this test no assertion we re just checking this doesn t throw an error no assertion we re just checking this doesn t throw an error after testing that the model accepts all int inputs confirm that its dummies are int32 also confirm that the input_signature uses int32 we want to test only encoder decoder models we check the state of decoder_attentions and cross_attentions just from the last step fails when we don t set ignore_mismatched_sizes true although tf models always have a prefix pointing to mainlayer we still add this without prefix test to keep a consistency between tf and pt tests the main input is the name of the argument after self this is some kinda funky decoder model that needs labels in its forward pass use a random other tensor assert we didn t lose any data assert we discarded the unwanted extra column but kept everything else assert we didn t lose any data this model isn t giving us labels after all don t try training with it use a random other tensor assert the labels are present assert we discarded the unwanted extra column but kept everything else make sure there are no pad tokens in prompt which may trigger unwanted behavior due to numerical instability let s fail the test only if there are more than 10 of input sequences give different outputs between xla and non xla versions if there are less than 10 examples let s be strict and not allow any difference generate until max length fix config for models with additional sequence length limiting settings xlnet will raise an exception when trying to set max_position_embeddings basic quick test for generate compatible classes that confirms that xla generated tokens are the same as their non xla counterparts either the model supports xla generation and passes the inner test or it raises an appropriate exception slow and challenging version of test_xla_generate_fast for contrastive search contrastive search directly manipulates the model cache and other outputs and this test ensures that they are in a valid format that is also supported by xla either the model supports xla generation and passes the inner test or it raises an appropriate exception slow and challenging version of test_xla_generate_fast this test asks for several long sequences using beam search with and without xla the two outputs should match and a failure in this test indicates that the model may need further analysis if it is to be used for xla generation either the model supports xla generation and passes the inner test or it raises an appropriate exception special tokens cannot be bad tokens create random bad tokens that are not special tokens for all bad word tokens for all slices in batch for all word idx if tokens match creates a random int32 tensor of the shape within the vocab size make sure that at least one token is attended to for each batch creates a random float32 tensor
from __future__ import annotations import copy import inspect import json import os import random import tempfile import unittest from importlib import import_module from math import isnan from typing import List, Tuple from datasets import Dataset from transformers import is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import ( CaptureLogger, _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, require_tf2onnx, slow, torch_device, ) from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput logger = logging.get_logger(__name__) if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFAutoModel, TFAutoModelForSequenceClassification, TFSharedEmbeddings, ) from transformers.generation import ( TFBeamSampleDecoderOnlyOutput, TFBeamSampleEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput, TFBeamSearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput, TFGreedySearchEncoderDecoderOutput, TFSampleDecoderOnlyOutput, TFSampleEncoderDecoderOutput, ) tf.config.experimental.enable_tensor_float_32_execution(False) if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: print(e) if is_torch_available(): import torch def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key: setattr(configs_no_init, key, 0.0) return configs_no_init @require_tf class TFModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () test_mismatched_shapes = True test_resize_embeddings = True test_head_masking = True is_encoder_decoder = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING), *get_values(TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), ]: inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING), ] and "labels" in dict(inspect.signature(model_class.call).parameters): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = tf.zeros( (self.model_tester.batch_size, num_patches**2), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = tf.zeros((self.model_tester.batch_size, height, width), dtype=tf.int32) elif model_class.__name__.endswith("ForCTC"): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict def test_initialization(self): pass def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) after_outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) def test_save_load_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) model_config = model.get_config() json.dumps(model_config) new_model = model_class.from_config(model.get_config()) _ = model_class.from_config(model.config) _ = new_model(self._prepare_for_class(inputs_dict, model_class)) new_model.set_weights(model.get_weights()) after_outputs = new_model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) @slow def test_saved_model_creation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = False config.output_attentions = False if hasattr(config, "use_cache"): config.use_cache = False model_class = self.all_model_classes[0] class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) model(class_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") self.assertTrue(os.path.exists(saved_model_dir)) def test_prepare_serving_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(inputs) serving_outputs = model.serving_output(outputs) for k, v in serving_outputs.items(): if isinstance(v, tuple): self.assertTrue(all(isinstance(elem, tf.Tensor) for elem in v)) elif v is not None: self.assertIsInstance(v, tf.Tensor) else: self.assertIsNone(v) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend(["decoder_position_ids"] if "decoder_position_ids" in arg_names else []) expected_arg_names.extend( ["head_mask", "decoder_head_mask"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) expected_arg_names.extend( ["cross_attn_head_mask", "encoder_outputs"] if "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_onnx_compliancy(self): if not self.test_onnx: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() INTERNAL_OPS = [ "Assert", "AssignVariableOp", "EmptyTensorList", "ReadVariableOp", "ResourceGather", "TruncatedNormal", "VarHandleOp", "VarIsInitializedOp", ] onnx_ops = [] with open(os.path.join(".", "utils", "tf_ops", "onnx.json")) as f: onnx_opsets = json.load(f)["opsets"] for i in range(1, self.onnx_min_opset + 1): onnx_ops.extend(onnx_opsets[str(i)]) for model_class in self.all_model_classes: model_op_names = set() with tf.Graph().as_default() as g: model = model_class(config) model.build() for op in g.get_operations(): model_op_names.add(op.node_def.op) model_op_names = sorted(model_op_names) incompatible_ops = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(op) self.assertEqual(len(incompatible_ops), 0, incompatible_ops) @unittest.skip("`tf2onnx` broke with TF 2.13") @require_tf2onnx @slow def test_onnx_runtime_optimize(self): if not self.test_onnx: return import onnxruntime import tf2onnx config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) model.build() onnx_model_proto, _ = tf2onnx.convert.from_keras(model, opset=self.onnx_min_opset) onnxruntime.InferenceSession(onnx_model_proto.SerializeToString()) def test_keras_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and tf.keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: if "T5" in main_layer_class.__name__: shared = TFSharedEmbeddings(99, 32, name="shared") config.use_cache = inputs_dict.pop("use_cache", None) main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = tf.keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = tf.keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, tf.keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) def assert_outputs_same(self, after_outputs, outputs): if isinstance(after_outputs, tf.Tensor): out_1 = after_outputs.numpy() elif isinstance(after_outputs, dict): out_1 = after_outputs[list(after_outputs.keys())[0]].numpy() else: out_1 = after_outputs[0].numpy() out_2 = outputs[0].numpy() self.assertEqual(out_1.shape, out_2.shape) out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def _make_attention_mask_non_null(self, inputs_dict): for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) inputs_dict[k] = attention_mask def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "TFFlaubertWithLMHeadModel", "TFFunnelForPreTraining", "TFElectraForPreTraining", "TFXLMWithLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("TFGPT2"): tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `names` should have the same length as `tf_outputs`", ) else: attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): if isinstance(key, bool): pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "pixel_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "input_features": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif tf_inputs_dict[name].dtype.is_floating: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) else: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long) return pt_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict) pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } pt_model.to(torch_device) pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model)) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) tf_inputs_dict_with_labels = self._prepare_for_class( inputs_dict, model_class, return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False, ) if not set(tf_inputs_dict_with_labels.keys()).symmetric_difference(tf_inputs_dict.keys()): tf_inputs_dict_with_labels = None tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) if tf_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels) with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) if tf_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels) @slow def test_compile_tf_model(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) functional_inputs = { key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=key) for key, val in model.input_signature.items() if key in model.dummy_inputs } outputs_dict = model(functional_inputs) hidden_states = outputs_dict[0] functional_model = tf.keras.Model(inputs=functional_inputs, outputs=hidden_states) model_out = functional_model.predict(model.dummy_inputs) self.assertTrue(model_out is not None) with tempfile.TemporaryDirectory() as tmpdirname: functional_model.save(tmpdirname) def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) outputs_keywords = model(**inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) def check_decoder_attentions_output(outputs): out_len = len(outputs) self.assertEqual(min(out_len % 2, out_len % 5), 0) decoder_attentions = outputs.decoder_attentions self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) def check_encoder_attentions_output(outputs): attentions = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) out_len = len(outputs) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) if self.is_encoder_decoder: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_decoder_attentions_output(outputs) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) inputs_dict["output_attentions"] = True config.output_hidden_states = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs)) self.assertEqual(model.config.output_hidden_states, True) check_encoder_attentions_output(outputs) def test_headmasking(self): if not self.test_head_masking: return random.Random().seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() random.Random().seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) def prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return tf.concat( (tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0 ) elif i == num_hidden_layers - 1: return tf.concat( (tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0 ) else: return tf.ones(attention_heads, dtype=tf.float32) head_mask = tf.stack( [ prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ], 0, ) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) def check_attentions_validity(attentions): for t in attentions: self.assertLess( (tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy() ) attentions = [ tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions ] self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0) self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0) if len(attentions) > 2: self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0) self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0) self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) if "cross_attn_head_mask" in arg_names: check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) if model.config.is_encoder_decoder: encoder_hidden_states = outputs.encoder_hidden_states decoder_hidden_states = outputs.decoder_hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(encoder_hidden_states), expected_num_layers) self.assertListEqual( list(encoder_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(decoder_hidden_states), expected_num_layers) self.assertListEqual( list(decoder_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) else: hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() text_in_text_out_models = ( get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING) + get_values(TF_MODEL_FOR_MASKED_LM_MAPPING) + get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING) ) speech_in_text_out_models = get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING) for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), tf.keras.layers.Layer) legacy_text_in_text_out = model.get_lm_head() is not None if model_class in text_in_text_out_models or legacy_text_in_text_out: out_embeddings = model.get_output_embeddings() self.assertIsInstance(out_embeddings, tf.keras.layers.Layer) bias = model.get_bias() if bias is not None: self.assertIsInstance(bias, dict) for _, v in bias.items(): self.assertIsInstance(v, tf.Variable) elif model_class in speech_in_text_out_models: out_embeddings = model.get_output_embeddings() self.assertIsInstance(out_embeddings, tf.keras.layers.Layer) bias = model.get_bias() self.assertIsNone(bias) else: out_embeddings = model.get_output_embeddings() assert out_embeddings is None bias = model.get_bias() self.assertIsNone(bias) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) first, second = ( model(self._prepare_for_class(inputs_dict, model_class), training=False)[0], model(self._prepare_for_class(inputs_dict, model_class), training=False)[0], ) out_1 = first.numpy() out_2 = second.numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(tuple_object, dict_object)), msg=( "Tuple and dict output are not equal. Difference:" f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) if "labels" in inspect.signature(model.call).parameters.keys(): tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) model(inputs) def test_numpy_arrays_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def prepare_numpy_arrays(inputs_dict): inputs_np_dict = {} for k, v in inputs_dict.items(): if tf.is_tensor(v): inputs_np_dict[k] = v.numpy() else: inputs_np_dict[k] = np.array(k) return inputs_np_dict for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) inputs_np = prepare_numpy_arrays(inputs) output_for_dict_input = model(inputs_np) output_for_kw_input = model(**inputs_np) self.assert_outputs_same(output_for_dict_input, output_for_kw_input) def test_valid_input_signature_and_dummies(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) call_args = inspect.signature(model.call).parameters for key in model.input_signature: self.assertIn(key, call_args) for key in model.dummy_inputs: self.assertIn(key, call_args) def test_resize_token_embeddings(self): if not self.test_resize_embeddings: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if isinstance(embedding_layer, tf.keras.layers.Embedding): model.build() return embedding_layer.embeddings else: return model._get_word_embedding_weight(embedding_layer) for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: model = model_class(config=copy.deepcopy(config)) old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_bias = model.get_bias() old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_bias = model.get_bias() new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_bias is not None and new_bias is not None: for old_weight, new_weight in zip(old_bias.values(), new_bias.values()): self.assertEqual(new_weight.shape[-1], assert_size) models_equal = True for p1, p2 in zip(tf.squeeze(old_weight), tf.squeeze(new_weight)): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1]) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) @slow def test_save_load_after_resize_token_embeddings(self): if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: new_tokens_size = 10 old_total_size = config.vocab_size new_total_size = old_total_size + new_tokens_size model = model_class(config=copy.deepcopy(config)) model.build() model.resize_token_embeddings(new_total_size) inputs_dict = copy.deepcopy(original_inputs_dict) ids_feat_name = None if "input_ids" in inputs_dict: ids_feat_name = "input_ids" elif "decoder_input_ids" in inputs_dict: ids_feat_name = "decoder_input_ids" else: assert False, "No input ids feature found in the inputs dict" new_vocab_input_ids = ids_tensor(inputs_dict[ids_feat_name].shape, new_tokens_size) new_vocab_input_ids += old_total_size inputs_dict[ids_feat_name] = new_vocab_input_ids if "input_ids" in inputs_dict: inputs_dict["input_ids"] = new_vocab_input_ids if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = new_vocab_input_ids prepared_inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model = model_class.from_pretrained(tmpdirname) restored_model_outputs = model(**prepared_inputs) self.assert_outputs_same(restored_model_outputs, outputs) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="This test always passes on CPU.", ) def test_embeddings_out_of_bounds_raise_exception(self): if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) inputs_dict = copy.deepcopy(original_inputs_dict) if "input_ids" in inputs_dict: inputs_dict["input_ids"] = inputs_dict["input_ids"] * int(1e9) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = inputs_dict["decoder_input_ids"] * int(1e9) prepared_inputs = self._prepare_for_class(inputs_dict, model_class) with self.assertRaises(tf.errors.InvalidArgumentError): model(**prepared_inputs) def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: with self.assertRaises(ValueError): model.generate(do_sample=True, max_length=5) self._check_generated_ids(model.generate(input_ids, do_sample=True)) elif model_class.__name__ not in ["TFSpeech2TextForConditionalGeneration"]: self._check_generated_ids(model.generate(do_sample=True, max_length=5)) with self.assertRaises(ValueError): model.generate(input_ids, do_sample=False, num_return_sequences=2) self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2)) bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) generated_ids = output_tokens[:, input_ids.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_no_beam_search_generate_dict_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) if input_ids is None: input_ids = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) output_greedy = model.generate( input_ids, do_sample=False, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) output_sample = model.generate( input_ids, do_sample=True, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, TFGreedySearchEncoderDecoderOutput) self.assertIsInstance(output_sample, TFSampleEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, TFGreedySearchDecoderOnlyOutput) self.assertIsInstance(output_sample, TFSampleDecoderOnlyOutput) def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2)) else: self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2)) with self.assertRaises(ValueError): model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2) self._check_generated_ids( model.generate( input_ids, do_sample=True, num_beams=2, num_return_sequences=2, ) ) self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2)) bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) generated_ids = output_tokens[:, input_ids.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_beam_search_generate_dict_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) if input_ids is None: input_ids = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) output_beam_search = model.generate( input_ids, num_beams=2, do_sample=False, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) output_beam_sample = model.generate( input_ids, num_beams=2, do_sample=True, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, TFBeamSearchEncoderDecoderOutput) self.assertIsInstance(output_beam_sample, TFBeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, TFBeamSearchDecoderOnlyOutput) self.assertIsInstance(output_beam_sample, TFBeamSampleDecoderOnlyOutput) def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) added_label_names = sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True) if not added_label_names: continue added_label = prepared_for_class[added_label_names[0]] expected_loss_size = added_label.shape.as_list()[:1] prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) outputs = model(model_input, **prepared_for_class) if not isinstance(outputs, ModelOutput) or not hasattr(outputs, "loss"): continue loss = outputs.loss self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) if "labels" in prepared_for_class: labels = prepared_for_class["labels"].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: labels[0] = -100 prepared_for_class["labels"] = tf.convert_to_tensor(labels) loss = model(model_input, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) loss = model(prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) loss = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3): self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) @slow def test_keras_fit(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) prepared_for_class = { key: val for key, val in prepared_for_class.items() if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "return_loss") } if "labels" in prepared_for_class and "decoder_input_ids" in prepared_for_class: del prepared_for_class["decoder_input_ids"] accuracy_classes = [ "ForPreTraining", "ForCausalLM", "ForMaskedLM", "ForQuestionAnswering", "ForMultipleChoice", "ForSequenceClassification", "ForTokenClassification", "ForNextSentencePrediction", "LMHeadModel", ] for accuracy_class in accuracy_classes: if model.__class__.__name__.endswith(accuracy_class): metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] break else: metrics = [] if hasattr(self.model_tester, "batch_size"): sample_weight = tf.convert_to_tensor([0.5] * self.model_tester.batch_size, dtype=tf.float32) else: sample_weight = None outputs = model(prepared_for_class) if getattr(outputs, "loss", None) is None: continue model_weights = model.get_weights() model.compile(optimizer=tf.keras.optimizers.SGD(0.0), run_eagerly=True, metrics=metrics) history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, sample_weight=sample_weight, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] self.assertTrue(not isnan(val_loss1)) accuracy1 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")} possible_label_cols = { "labels", "label", "label_ids", "start_positions", "start_position", "end_positions", "end_position", "next_sentence_label", } label_names = possible_label_cols.intersection(set(prepared_for_class)) if len(label_names) == 0: return labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} self.assertGreater(len(inputs_minus_labels), 0) model.set_weights(model_weights) history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), sample_weight=sample_weight, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(not isnan(val_loss2)) accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")} self.check_keras_fit_results(val_loss1, val_loss2) self.assertEqual(history1.history.keys(), history2.history.keys()) for key in history1.history.keys(): if not key.startswith("val_"): self.assertTrue("val_" + key in history1.history.keys(), "Outputs differ in train/test step!") if metrics: self.assertTrue(len(accuracy1) == len(accuracy2) > 0, "Missing metrics!") def test_int_support(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: prepared_for_class = self._prepare_for_class( inputs_dict.copy(), model_class, return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False, ) if not any( tensor.dtype.is_integer for tensor in prepared_for_class.values() if isinstance(tensor, tf.Tensor) ): return prepared_for_class = { key: tf.cast(tensor, tf.int64) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor for key, tensor in prepared_for_class.items() } model = model_class(config) model(**prepared_for_class) int32_prepared_for_class = { key: tf.cast(tensor, tf.int32) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor for key, tensor in prepared_for_class.items() } model(**int32_prepared_for_class) for key, tensor in model.dummy_inputs.items(): self.assertTrue( isinstance(tensor, tf.Tensor) or tf.keras.backend.is_keras_tensor(tensor), "Dummy inputs should be tf.Tensor!", ) if tensor.dtype.is_integer: self.assertTrue(tensor.dtype == tf.int32, "Integer dummy inputs should be tf.int32!") for key, tensor_spec in model.input_signature.items(): if tensor_spec.dtype.is_integer: self.assertTrue(tensor_spec.dtype == tf.int32, "Input signatures should use tf.int32 for ints!") def test_generate_with_headmasking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_generative_model_classes: model = model_class(config) if not config.is_encoder_decoder: continue head_masking = { "head_mask": tf.zeros((config.encoder_layers, config.encoder_attention_heads)), "decoder_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)), "cross_attn_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)), } signature = inspect.signature(model.call) if set(head_masking.keys()) < {*signature.parameters.keys()}: continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( inputs_dict["input_ids"], num_beams=1, max_length=inputs_dict["input_ids"] + 5, output_attentions=True, return_dict_in_generate=True, **{name: mask}, ) attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([tf.reduce_sum(w).numpy() for w in attn_weights]), 0.0) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) _ = model(**inputs) model.save_pretrained(tmp_dir) with self.assertRaises(ValueError): new_model = TFAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = TFAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_tf_utils") with CaptureLogger(logger) as cl: new_model = TFAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = TFAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "call")) observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_dataset_conversion(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=False) if "labels" in tf_inputs_dict: return tf_inputs_dict = { key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key and isinstance(val, tf.Tensor) } tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] input_dataset = Dataset.from_dict(tf_inputs_dict) tf_dataset = model.prepare_tf_dataset( input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False ) test_batch = next(iter(tf_dataset)) if isinstance(test_batch, tf.Tensor): self.assertEqual(len(test_batch), len(input_dataset)) elif isinstance(test_batch, dict): self.assertEqual(len(test_batch), len(input_dataset.features) - 1) self.assertNotIn("extra_unwanted_column", test_batch) for tensor in test_batch.values(): self.assertTrue(isinstance(tensor, tf.Tensor)) self.assertEqual(len(tensor), len(input_dataset)) model(test_batch, training=False) if "labels" in inspect.signature(model_class.call).parameters.keys(): tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if "labels" not in tf_inputs_dict: return tf_inputs_dict = {key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key} tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] input_dataset = Dataset.from_dict(tf_inputs_dict) tf_dataset = model.prepare_tf_dataset( input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False ) test_batch, test_batch_labels = next(iter(tf_dataset)) self.assertGreater(len(test_batch_labels), 0) feature_columns = 1 if isinstance(test_batch, tf.Tensor) else len(test_batch) label_columns = 1 if isinstance(test_batch_labels, tf.Tensor) else len(test_batch_labels) self.assertEqual(feature_columns + label_columns, len(input_dataset.features) - 1) if isinstance(test_batch, dict): self.assertNotIn("extra_unwanted_column", test_batch) if isinstance(test_batch_labels, dict): self.assertNotIn("extra_unwanted_column", test_batch_labels) model.compile(optimizer="sgd", run_eagerly=True) model.train_on_batch(test_batch, test_batch_labels) def _test_xla_generate(self, **generate_kwargs): def _generate_and_check_results(model, inputs_dict): if "input_ids" in inputs_dict: inputs = inputs_dict["input_ids"] if model.generation_config.pad_token_id is not None: if config.pad_token_id == 0: new_pad_token = model.generation_config.pad_token_id + 1 else: new_pad_token = model.generation_config.pad_token_id - 1 else: new_pad_token = None inputs = tf.where(inputs != model.generation_config.pad_token_id, inputs, new_pad_token) elif "input_features" in inputs_dict: inputs = inputs_dict["input_features"] else: raise ValueError("No valid generate input found in inputs_dict") generated = model.generate(inputs, **generate_kwargs).numpy() generate_xla = tf.function(model.generate, jit_compile=True) generated_xla = generate_xla(inputs, **generate_kwargs).numpy() diff = [[], []] for _generated, _generated_xla in zip(generated.tolist(), generated_xla.tolist()): if _generated != _generated_xla: diff[0].append(_generated) diff[1].append(_generated_xla) ratio = len(diff[0]) / len(generated) if ratio > 0.1 or (len(diff[0]) > 0 and len(generated) < 10): self.assertListEqual(diff[0], diff[1]) for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.eos_token_id = None config.do_sample = False for var_name in ["max_position_embeddings", "max_target_positions"]: attr = getattr(config, var_name, None) if attr is not None and attr < generate_kwargs["max_new_tokens"]: try: setattr(config, var_name, generate_kwargs["max_new_tokens"]) except NotImplementedError: pass model = model_class(config) if model.supports_xla_generation: _generate_and_check_results(model, inputs_dict) else: with self.assertRaises(ValueError): _generate_and_check_results(model, inputs_dict) def test_xla_generate_fast(self): self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=3) @slow def test_xla_generate_contrastive(self): self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=16, penalty_alpha=0.5, top_k=4) @slow def test_xla_generate_slow(self): self._test_xla_generate(num_beams=8, num_return_sequences=2, max_new_tokens=128) def _generate_random_bad_tokens(self, num_bad_tokens, model): special_tokens = [] if model.config.bos_token_id is not None: special_tokens.append(model.config.bos_token_id) if model.config.pad_token_id is not None: special_tokens.append(model.config.pad_token_id) if model.config.eos_token_id is not None: special_tokens.append(model.config.eos_token_id) bad_tokens = [] while len(bad_tokens) < num_bad_tokens: token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0] if token not in special_tokens: bad_tokens.append(token) return bad_tokens def _check_generated_ids(self, output_ids): for token_id in output_ids[0].numpy().tolist(): self.assertGreaterEqual(token_id, 0) self.assertLess(token_id, self.model_tester.vocab_size) def _check_match_tokens(self, generated_ids, bad_words_ids): for bad_word_ids in bad_words_ids: for generated_ids_slice in generated_ids: for i in range(len(bad_word_ids), len(generated_ids_slice)): if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids: return True return False def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None): if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32) return output def random_attention_mask(shape, rng=None, name=None, dtype=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None, dtype=dtype) attn_mask = tf.concat([attn_mask[:, :-1], tf.ones_like(attn_mask[:, -1:], dtype=dtype)], axis=-1) return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None, dtype=None): if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return tf.reshape(tf.constant(values, dtype=dtype if dtype is not None else tf.float32), shape=shape)
codingutf8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license restrict tensorflow to only allocate x gb of memory on the gpus virtual devices must be set before gpus have been initialized a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the model this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 tests whether the unpackinputs function behaves as expected test case 1 pass inputs as keyword arguments booleans are inherited from the config test case 2 same as above but with positional arguments test case 3 we can also pack everything in the first input test case 4 explicit boolean arguments should override the config test case 5 unexpected arguments should raise an exception test case 6 the decorator is independent from maininputname it treats the first argument of the decorated function as its main input tests whether the stable softmax is stable on cpu with and without xla same outcome regardless of the boolean mask here we can randomly mask a random numerical input outside xla the stable softmax has the same output as the original softmax we can randomly mask a random numerical input inside xla the model above is the same as the model below just a sharded version if this doesn t throw an error then the test passes the model above is the same as the model below just a sharded pytorch version the model above is the same as the model below just a sharded pytorch version this is the model we will use total size 340 000 bytes split is first two layers then last two split is first layer second layer then last 2 we use the same folder for various sizes to make sure a new save erases the old checkpoint get each shard file and its size check there is an index but no regular weight file check a file is bigger than maxsize only when it has a single weight note pickle adds some junk so the weight of the file can end up being slightly bigger than the size asked for since we count parameters check the index and the shard files found match finally check the model can be reloaded short custom tf signature function inputsignature is specific to bert using default signature default behavior overrides servingdefault providing custom signature function providing multiple custom signature function no tfmodel h5 file only a model safetensors check models are equal check we have a model safetensors file check models are equal can load from the tfformatted checkpoint check models are equal can load from the pytorchformatted checkpoint check models are equal this should not raise even if there are two types of sharded weights this should not raise even if there are two types of sharded weights this should discard the safetensors weights in favor of the h5 sharded weights this test checks that we can load safetensors from a checkpoint that only has those on the hub this test checks that we can load safetensors from a checkpoint that only has those on the hub saved in the pt format can load from the pytorchformatted checkpoint this test checks that we can load safetensors from a local checkpoint that only has those saved in the pt format can load from the pytorchformatted checkpoint this test checks that we ll first download h5 weights before safetensors the safetensors file on that repo is a pt safetensors and therefore cannot be loaded without pytorch this test checks that we ll first download h5 weights before safetensors the safetensors file on that repo is a pt safetensors and therefore cannot be loaded without pytorch make sure model is properly initialized check the model card was created and uploaded reset repo push to hub via savepretrained make sure model is properly initialized reset repo push to hub via savepretrained coding utf 8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa f401 restrict tensorflow to only allocate x gb of memory on the gpus virtual devices must be set before gpus have been initialized a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the model this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 tests whether the unpack_inputs function behaves as expected test case 1 pass inputs as keyword arguments booleans are inherited from the config test case 2 same as above but with positional arguments test case 3 we can also pack everything in the first input test case 4 explicit boolean arguments should override the config test case 5 unexpected arguments should raise an exception test case 6 the decorator is independent from main_input_name it treats the first argument of the decorated function as its main input tests whether the stable softmax is stable on cpu with and without xla same outcome regardless of the boolean mask here we can randomly mask a random numerical input outside xla the stable softmax has the same output as the original softmax we can randomly mask a random numerical input inside xla the model above is the same as the model below just a sharded version if this doesn t throw an error then the test passes the model above is the same as the model below just a sharded pytorch version the model above is the same as the model below just a sharded pytorch version this is the model we will use total size 340 000 bytes size 80 000 size 160 000 size 80 000 size 20 000 split is first two layers then last two split is first layer second layer then last 2 we use the same folder for various sizes to make sure a new save erases the old checkpoint get each shard file and its size check there is an index but no regular weight file check a file is bigger than max_size only when it has a single weight note pickle adds some junk so the weight of the file can end up being slightly bigger than the size asked for since we count parameters check the index and the shard files found match finally check the model can be reloaded short custom tf signature function input_signature is specific to bert using default signature default behavior overrides serving_default providing custom signature function providing multiple custom signature function no tf_model h5 file only a model safetensors check models are equal check we have a model safetensors file check models are equal can load from the tf formatted checkpoint check models are equal can load from the pytorch formatted checkpoint check models are equal this should not raise even if there are two types of sharded weights this should not raise even if there are two types of sharded weights this should discard the safetensors weights in favor of the h5 sharded weights this test checks that we can load safetensors from a checkpoint that only has those on the hub this test checks that we can load safetensors from a checkpoint that only has those on the hub saved in the pt format can load from the pytorch formatted checkpoint this test checks that we can load safetensors from a local checkpoint that only has those saved in the pt format can load from the pytorch formatted checkpoint this test checks that we ll first download h5 weights before safetensors the safetensors file on that repo is a pt safetensors and therefore cannot be loaded without pytorch this test checks that we ll first download h5 weights before safetensors the safetensors file on that repo is a pt safetensors and therefore cannot be loaded without pytorch make sure model is properly initialized check the model card was created and uploaded reset repo push to hub via save_pretrained make sure model is properly initialized reset repo push to hub via save_pretrained
from __future__ import annotations import inspect import json import os import random import tempfile import unittest import unittest.mock as mock from huggingface_hub import HfFolder, Repository, delete_repo, snapshot_download from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import is_tf_available, is_torch_available from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, _tf_gpu_memory_limit, is_pt_tf_cross_test, is_staging_test, require_safetensors, require_tf, require_torch, slow, ) from transformers.utils import SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, logging logger = logging.get_logger(__name__) if is_tf_available(): import h5py import numpy as np import tensorflow as tf from transformers import ( BertConfig, PreTrainedModel, PushToHubCallback, RagRetriever, TFBertForMaskedLM, TFBertForSequenceClassification, TFBertModel, TFPreTrainedModel, TFRagModel, ) from transformers.modeling_tf_utils import tf_shard_checkpoint, unpack_inputs from transformers.tf_utils import stable_softmax tf.config.experimental.enable_tensor_float_32_execution(False) if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: print(e) if is_torch_available(): from transformers import BertModel @require_tf class TFModelUtilsTest(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} _ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") mock_head.assert_called() def test_load_from_one_file(self): try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/tf_model.h5", f) config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = TFBertModel.from_pretrained(tmp_file, config=config) finally: os.remove(tmp_file) def test_legacy_load_from_url(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = TFBertModel.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/tf_model.h5", config=config ) def test_unpack_inputs(self): class DummyModel: def __init__(self): config_kwargs = {"output_attentions": False, "output_hidden_states": False, "return_dict": False} self.config = PretrainedConfig(**config_kwargs) self.main_input_name = "input_ids" @unpack_inputs def call( self, input_ids=None, past_key_values=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return input_ids, past_key_values, output_attentions, output_hidden_states, return_dict @unpack_inputs def foo(self, pixel_values, output_attentions=None, output_hidden_states=None, return_dict=None): return pixel_values, output_attentions, output_hidden_states, return_dict dummy_model = DummyModel() input_ids = tf.constant([0, 1, 2, 3], dtype=tf.int32) past_key_values = tf.constant([4, 5, 6, 7], dtype=tf.int32) pixel_values = tf.constant([8, 9, 10, 11], dtype=tf.int32) output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertFalse(output[4]) output = dummy_model.call(input_ids, past_key_values) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertFalse(output[4]) output = dummy_model.call(input_ids={"input_ids": input_ids, "past_key_values": past_key_values}) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertFalse(output[4]) output = dummy_model.call( input_ids=input_ids, past_key_values=past_key_values, output_attentions=False, return_dict=True ) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertTrue(output[4]) with self.assertRaises(ValueError): output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values, foo="bar") output = dummy_model.foo(pixel_values=pixel_values) tf.debugging.assert_equal(output[0], pixel_values) self.assertFalse(output[1]) self.assertFalse(output[2]) self.assertFalse(output[3]) def test_xla_stable_softmax(self): large_penalty = -1e9 n_tokens = 10 batch_size = 8 def masked_softmax(x, boolean_mask): numerical_mask = (1.0 - tf.cast(boolean_mask, dtype=tf.float32)) * large_penalty masked_x = x + numerical_mask return stable_softmax(masked_x) xla_masked_softmax = tf.function(masked_softmax, jit_compile=True) xla_stable_softmax = tf.function(stable_softmax, jit_compile=True) x = tf.random.normal((batch_size, n_tokens)) masked_tokens = random.randint(0, n_tokens) boolean_mask = tf.convert_to_tensor([[1] * (n_tokens - masked_tokens) + [0] * masked_tokens], dtype=tf.int32) numerical_mask = (1.0 - tf.cast(boolean_mask, dtype=tf.float32)) * large_penalty masked_x = x + numerical_mask xla_out = xla_stable_softmax(masked_x) out = stable_softmax(masked_x) assert tf.experimental.numpy.allclose(xla_out, out) unstable_out = tf.nn.softmax(masked_x) assert tf.experimental.numpy.allclose(unstable_out, out) xla_out = xla_masked_softmax(x, boolean_mask) out = masked_softmax(x, boolean_mask) assert tf.experimental.numpy.allclose(xla_out, out) def test_checkpoint_sharding_from_hub(self): model = TFBertModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) def test_sharded_checkpoint_with_prefix(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", load_weight_prefix="a/b") sharded_model = TFBertModel.from_pretrained("ArthurZ/tiny-random-bert-sharded", load_weight_prefix="a/b") for p1, p2 in zip(model.weights, sharded_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) self.assertTrue(p1.name.startswith("a/b/")) self.assertTrue(p2.name.startswith("a/b/")) def test_sharded_checkpoint_transfer(self): TFBertForSequenceClassification.from_pretrained("ArthurZ/tiny-random-bert-sharded") @is_pt_tf_cross_test def test_checkpoint_sharding_local_from_pt(self): with tempfile.TemporaryDirectory() as tmp_dir: _ = Repository(local_dir=tmp_dir, clone_from="hf-internal-testing/tiny-random-bert-sharded") model = TFBertModel.from_pretrained(tmp_dir, from_pt=True) ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) @is_pt_tf_cross_test def test_checkpoint_loading_with_prefix_from_pt(self): model = TFBertModel.from_pretrained( "hf-internal-testing/tiny-random-bert", from_pt=True, load_weight_prefix="a/b" ) ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True) for p1, p2 in zip(model.weights, ref_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) self.assertTrue(p1.name.startswith("a/b/")) @is_pt_tf_cross_test def test_checkpoint_sharding_hub_from_pt(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) def test_shard_checkpoint(self): model = tf.keras.Sequential( [ tf.keras.layers.Dense(200, use_bias=False), tf.keras.layers.Dense(200, use_bias=False), tf.keras.layers.Dense(100, use_bias=False), tf.keras.layers.Dense(50, use_bias=False), ] ) inputs = tf.zeros((1, 100), dtype=tf.float32) model(inputs) weights = model.weights weights_dict = {w.name: w for w in weights} with self.subTest("No shard when max size is bigger than model size"): shards, index = tf_shard_checkpoint(weights) self.assertIsNone(index) self.assertDictEqual(shards, {TF2_WEIGHTS_NAME: weights}) with self.subTest("Test sharding, no weights bigger than max size"): shards, index = tf_shard_checkpoint(weights, max_shard_size="300kB") self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "dense/kernel:0": "tf_model-00001-of-00002.h5", "dense_1/kernel:0": "tf_model-00001-of-00002.h5", "dense_2/kernel:0": "tf_model-00002-of-00002.h5", "dense_3/kernel:0": "tf_model-00002-of-00002.h5", }, }, ) shard1 = [weights_dict["dense/kernel:0"], weights_dict["dense_1/kernel:0"]] shard2 = [weights_dict["dense_2/kernel:0"], weights_dict["dense_3/kernel:0"]] self.assertDictEqual(shards, {"tf_model-00001-of-00002.h5": shard1, "tf_model-00002-of-00002.h5": shard2}) with self.subTest("Test sharding with weights bigger than max size"): shards, index = tf_shard_checkpoint(weights, max_shard_size="100kB") self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "dense/kernel:0": "tf_model-00001-of-00003.h5", "dense_1/kernel:0": "tf_model-00002-of-00003.h5", "dense_2/kernel:0": "tf_model-00003-of-00003.h5", "dense_3/kernel:0": "tf_model-00003-of-00003.h5", }, }, ) shard1 = [weights_dict["dense/kernel:0"]] shard2 = [weights_dict["dense_1/kernel:0"]] shard3 = [weights_dict["dense_2/kernel:0"], weights_dict["dense_3/kernel:0"]] self.assertDictEqual( shards, { "tf_model-00001-of-00003.h5": shard1, "tf_model-00002-of-00003.h5": shard2, "tf_model-00003-of-00003.h5": shard3, }, ) @slow def test_special_layer_name_sharding(self): retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) model = TFRagModel.from_pretrained("facebook/rag-token-nq", retriever=retriever) with tempfile.TemporaryDirectory() as tmp_dir: for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) ref_model = TFRagModel.from_pretrained(tmp_dir, retriever=retriever) for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) def test_checkpoint_sharding_local(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".h5"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, TF2_WEIGHTS_INDEX_NAME) self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME))) for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 if size >= max_size_int + 50000: with h5py.File(shard_file, "r") as state_file: self.assertEqual(len(state_file), 1) with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".h5")} self.assertSetEqual(all_shards, shards_found) new_model = TFBertModel.from_pretrained(tmp_dir) model.build() new_model.build() for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @slow def test_save_pretrained_signatures(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") @tf.function( input_signature=[ [ tf.TensorSpec([None, None], tf.int32, name="input_ids"), tf.TensorSpec([None, None], tf.int32, name="token_type_ids"), tf.TensorSpec([None, None], tf.int32, name="attention_mask"), ] ] ) def serving_fn(input): return model(input) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, saved_model=True, signatures=None) model_loaded = tf.keras.models.load_model(f"{tmp_dir}/saved_model/1") self.assertTrue("serving_default" in list(model_loaded.signatures.keys())) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, saved_model=True, signatures={"custom_signature": serving_fn}) model_loaded = tf.keras.models.load_model(f"{tmp_dir}/saved_model/1") self.assertTrue("custom_signature" in list(model_loaded.signatures.keys())) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, saved_model=True, signatures={"custom_signature_1": serving_fn, "custom_signature_2": serving_fn}, ) model_loaded = tf.keras.models.load_model(f"{tmp_dir}/saved_model/1") self.assertTrue("custom_signature_1" in list(model_loaded.signatures.keys())) self.assertTrue("custom_signature_2" in list(model_loaded.signatures.keys())) @require_safetensors def test_safetensors_save_and_load(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME))) new_model = TFBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @is_pt_tf_cross_test def test_safetensors_save_and_load_pt_to_tf(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: pt_model.save_pretrained(tmp_dir, safe_serialization=True) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = TFBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_load_from_hub(self): tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors-tf") for p1, p2 in zip(safetensors_model.weights, tf_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") for p1, p2 in zip(safetensors_model.weights, tf_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_tf_from_tf(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = TFBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors @is_pt_tf_cross_test def test_safetensors_tf_from_torch(self): hub_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = TFBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_local(self): with tempfile.TemporaryDirectory() as tmp_dir: path = snapshot_download("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded", cache_dir=tmp_dir) TFBertModel.from_pretrained(path) @require_safetensors def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_hub(self): TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded") @require_safetensors def test_safetensors_load_from_local(self): with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-tf-only", cache_dir=tmp) tf_model = TFBertModel.from_pretrained(location) with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-tf-safetensors-only", cache_dir=tmp) safetensors_model = TFBertModel.from_pretrained(location) for p1, p2 in zip(tf_model.weights, safetensors_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_load_from_hub_from_safetensors_pt(self): tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-h5") safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") for p1, p2 in zip(tf_model.weights, safetensors_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_load_from_local_from_safetensors_pt(self): with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-h5", cache_dir=tmp) tf_model = TFBertModel.from_pretrained(location) with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) safetensors_model = TFBertModel.from_pretrained(location) for p1, p2 in zip(tf_model.weights, safetensors_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_load_from_hub_h5_before_safetensors(self): TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-msgpack") @require_safetensors def test_safetensors_load_from_local_h5_before_safetensors(self): with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors-msgpack", cache_dir=tmp) TFBertModel.from_pretrained(location) @require_tf @is_staging_test class TFModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model-tf") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-model-tf-callback") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-tf-org") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = TFBertModel(config) model.build() logging.set_verbosity_info() logger = logging.get_logger("transformers.utils.hub") with CaptureLogger(logger) as cl: model.push_to_hub("test-model-tf", token=self._token) logging.set_verbosity_warning() self.assertIn("Uploading the following files to __DUMMY_TRANSFORMERS_USER__/test-model-tf", cl.out) new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) delete_repo(token=self._token, repo_id="test-model-tf") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model-tf", push_to_hub=True, token=self._token) new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) @is_pt_tf_cross_test def test_push_to_hub_callback(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = TFBertForMaskedLM(config) model.compile() with tempfile.TemporaryDirectory() as tmp_dir: push_to_hub_callback = PushToHubCallback( output_dir=tmp_dir, hub_model_id="test-model-tf-callback", hub_token=self._token, ) model.fit(model.dummy_inputs, model.dummy_inputs, epochs=1, callbacks=[push_to_hub_callback]) new_model = TFBertForMaskedLM.from_pretrained(f"{USER}/test-model-tf-callback") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) tf_push_to_hub_params = dict(inspect.signature(TFPreTrainedModel.push_to_hub).parameters) tf_push_to_hub_params.pop("base_model_card_args") pt_push_to_hub_params = dict(inspect.signature(PreTrainedModel.push_to_hub).parameters) pt_push_to_hub_params.pop("deprecated_kwargs") self.assertDictEaual(tf_push_to_hub_params, pt_push_to_hub_params) def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = TFBertModel(config) model.build() model.push_to_hub("valid_org/test-model-tf-org", token=self._token) new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) delete_repo(token=self._token, repo_id="valid_org/test-model-tf-org") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id="valid_org/test-model-tf-org") new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the default value hfinternaltesting is for running the pipeline testing against the tiny models on the hub for debugging purpose we can specify a local path which is the outputpath argument of a previous run of utilscreatedummymodels py dynamically import the transformers module to grab the attribute classes of the processor form their names run pipeline tests for a specific task args task str a task name this should be a key in the mapping pipelinetestmapping get the canonical name adding none if empty so we can generate tests run pipeline tests for a specific task with the give model class and tokenizerprocessor class names args task str a task name this should be a key in the mapping pipelinetestmapping reponame str a model repository id on the hub modelarchitecture type a subclass of pretrainedmodel or pretrainedmodel tokenizernames liststr a list of names of a subclasses of pretrainedtokenizerfast or pretrainedtokenizer processornames liststr a list of names of subclasses of baseimageprocessor or featureextractionmixin get an instance of the corresponding class xxxpipelinetests in order to use gettestpipeline and runpipelinetest run pipeline tests for a specific task with the give model class and tokenizerprocessor class name the model will be loaded from a model repository on the hub args task str a task name this should be a key in the mapping pipelinetestmapping reponame str a model repository id on the hub modelarchitecture type a subclass of pretrainedmodel or pretrainedmodel tokenizername str the name of a subclass of pretrainedtokenizerfast or pretrainedtokenizer processorname str the name of a subclass of baseimageprocessor or featureextractionmixin if the required packages like pillow or torchaudio are not installed this will fail todo maybe not upload such problematic tiny models to hub todo we should check if a model file is on the hub repo instead validate get an instance of the corresponding class xxxpipelinetests in order to use gettestpipeline and runpipelinetest the test can disable itself but it should be very marginal concerns wav2vec2forctc without tokenizer test fasttokenizer don t exist need to copy because conversation are stateful 10 examples with batch size 4 means there needs to be a unfinished batch which is important for the unbatcher need to copy because conversation object is mutated this contains the test cases to be skipped without model architecture being involved skip some tests based on the classes or their names without the instantiated objects this is to avoid calling frompretrained so reducing the runtime if we already know the tests will fail no fix is required for this case documentquestionansweringpipelinetests requires a fast tokenizer skip some more tests based on the information from the instantiated objects no fix is required for this case if pipelinetestcassename qapipelinetests and tokenizer is not none and getattrtokenizer padtoken none is none and not tokenizer class name endswithfast qapipelinetests doesn t work with a slow tokenizer that has no pad token return true return false def validatetestcomponentstestcase task model tokenizer processor todo move this to tiny model creation script headspecific within a model type necessary changes to the config 1 for blenderbotforcausallm if model class name blenderbotforcausallm model config encodernorepeatngramsize 0 todo change the tiny model creation script don t create models with problematic tokenizers avoid indexerror in embedding layers configwithoutvocabsize canineconfig if tokenizer is not none configvocabsize getattrmodel config vocabsize none for cliplike models if configvocabsize is none if hasattrmodel config textconfig configvocabsize getattrmodel config textconfig vocabsize none elif hasattrmodel config textencoder configvocabsize getattrmodel config textencoder vocabsize none if configvocabsize is none and model config class name not in configwithoutvocabsize raise valueerror could not determine vocabsize from model configuration while tokenizer is not none coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the default value hf internal testing is for running the pipeline testing against the tiny models on the hub for debugging purpose we can specify a local path which is the output_path argument of a previous run of utils create_dummy_models py dynamically import the transformers module to grab the attribute classes of the processor form their names run pipeline tests for a specific task args task str a task name this should be a key in the mapping pipeline_test_mapping get the canonical name adding none if empty so we can generate tests run pipeline tests for a specific task with the give model class and tokenizer processor class names args task str a task name this should be a key in the mapping pipeline_test_mapping repo_name str a model repository id on the hub model_architecture type a subclass of pretrainedmodel or pretrainedmodel tokenizer_names list str a list of names of a subclasses of pretrainedtokenizerfast or pretrainedtokenizer processor_names list str a list of names of subclasses of baseimageprocessor or featureextractionmixin get an instance of the corresponding class xxxpipelinetests in order to use get_test_pipeline and run_pipeline_test run pipeline tests for a specific task with the give model class and tokenizer processor class name the model will be loaded from a model repository on the hub args task str a task name this should be a key in the mapping pipeline_test_mapping repo_name str a model repository id on the hub model_architecture type a subclass of pretrainedmodel or pretrainedmodel tokenizer_name str the name of a subclass of pretrainedtokenizerfast or pretrainedtokenizer processor_name str the name of a subclass of baseimageprocessor or featureextractionmixin if the required packages like pillow or torchaudio are not installed this will fail todo maybe not upload such problematic tiny models to hub todo we should check if a model file is on the hub repo instead validate get an instance of the corresponding class xxxpipelinetests in order to use get_test_pipeline and run_pipeline_test the test can disable itself but it should be very marginal concerns wav2vec2forctc without tokenizer test fasttokenizer don t exist need to copy because conversation are stateful no batching for this and it s ok 10 examples with batch size 4 means there needs to be a unfinished batch which is important for the unbatcher need to copy because conversation object is mutated this contains the test cases to be skipped without model architecture being involved skip some tests based on the classes or their names without the instantiated objects this is to avoid calling from_pretrained so reducing the runtime if we already know the tests will fail no fix is required for this case documentquestionansweringpipelinetests requires a fast tokenizer noqa skip some more tests based on the information from the instantiated objects no fix is required for this case qapipelinetests doesn t work with a slow tokenizer that has no pad token todo move this to tiny model creation script head specific within a model type necessary changes to the config 1 for blenderbotforcausallm todo change the tiny model creation script don t create models with problematic tokenizers avoid indexerror in embedding layers for clip like models
import copy import json import os import random import unittest from pathlib import Path from transformers.testing_utils import ( is_pipeline_test, require_decord, require_pytesseract, require_timm, require_torch, require_torch_or_tf, require_vision, ) from transformers.utils import direct_transformers_import, logging from .pipelines.test_pipelines_audio_classification import AudioClassificationPipelineTests from .pipelines.test_pipelines_automatic_speech_recognition import AutomaticSpeechRecognitionPipelineTests from .pipelines.test_pipelines_conversational import ConversationalPipelineTests from .pipelines.test_pipelines_depth_estimation import DepthEstimationPipelineTests from .pipelines.test_pipelines_document_question_answering import DocumentQuestionAnsweringPipelineTests from .pipelines.test_pipelines_feature_extraction import FeatureExtractionPipelineTests from .pipelines.test_pipelines_fill_mask import FillMaskPipelineTests from .pipelines.test_pipelines_image_classification import ImageClassificationPipelineTests from .pipelines.test_pipelines_image_segmentation import ImageSegmentationPipelineTests from .pipelines.test_pipelines_image_to_image import ImageToImagePipelineTests from .pipelines.test_pipelines_image_to_text import ImageToTextPipelineTests from .pipelines.test_pipelines_mask_generation import MaskGenerationPipelineTests from .pipelines.test_pipelines_object_detection import ObjectDetectionPipelineTests from .pipelines.test_pipelines_question_answering import QAPipelineTests from .pipelines.test_pipelines_summarization import SummarizationPipelineTests from .pipelines.test_pipelines_table_question_answering import TQAPipelineTests from .pipelines.test_pipelines_text2text_generation import Text2TextGenerationPipelineTests from .pipelines.test_pipelines_text_classification import TextClassificationPipelineTests from .pipelines.test_pipelines_text_generation import TextGenerationPipelineTests from .pipelines.test_pipelines_text_to_audio import TextToAudioPipelineTests from .pipelines.test_pipelines_token_classification import TokenClassificationPipelineTests from .pipelines.test_pipelines_translation import TranslationPipelineTests from .pipelines.test_pipelines_video_classification import VideoClassificationPipelineTests from .pipelines.test_pipelines_visual_question_answering import VisualQuestionAnsweringPipelineTests from .pipelines.test_pipelines_zero_shot import ZeroShotClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_audio_classification import ZeroShotAudioClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_image_classification import ZeroShotImageClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_object_detection import ZeroShotObjectDetectionPipelineTests pipeline_test_mapping = { "audio-classification": {"test": AudioClassificationPipelineTests}, "automatic-speech-recognition": {"test": AutomaticSpeechRecognitionPipelineTests}, "conversational": {"test": ConversationalPipelineTests}, "depth-estimation": {"test": DepthEstimationPipelineTests}, "document-question-answering": {"test": DocumentQuestionAnsweringPipelineTests}, "feature-extraction": {"test": FeatureExtractionPipelineTests}, "fill-mask": {"test": FillMaskPipelineTests}, "image-classification": {"test": ImageClassificationPipelineTests}, "image-segmentation": {"test": ImageSegmentationPipelineTests}, "image-to-image": {"test": ImageToImagePipelineTests}, "image-to-text": {"test": ImageToTextPipelineTests}, "mask-generation": {"test": MaskGenerationPipelineTests}, "object-detection": {"test": ObjectDetectionPipelineTests}, "question-answering": {"test": QAPipelineTests}, "summarization": {"test": SummarizationPipelineTests}, "table-question-answering": {"test": TQAPipelineTests}, "text2text-generation": {"test": Text2TextGenerationPipelineTests}, "text-classification": {"test": TextClassificationPipelineTests}, "text-generation": {"test": TextGenerationPipelineTests}, "text-to-audio": {"test": TextToAudioPipelineTests}, "token-classification": {"test": TokenClassificationPipelineTests}, "translation": {"test": TranslationPipelineTests}, "video-classification": {"test": VideoClassificationPipelineTests}, "visual-question-answering": {"test": VisualQuestionAnsweringPipelineTests}, "zero-shot": {"test": ZeroShotClassificationPipelineTests}, "zero-shot-audio-classification": {"test": ZeroShotAudioClassificationPipelineTests}, "zero-shot-image-classification": {"test": ZeroShotImageClassificationPipelineTests}, "zero-shot-object-detection": {"test": ZeroShotObjectDetectionPipelineTests}, } for task, task_info in pipeline_test_mapping.items(): test = task_info["test"] task_info["mapping"] = { "pt": getattr(test, "model_mapping", None), "tf": getattr(test, "tf_model_mapping", None), } TRANSFORMERS_TINY_MODEL_PATH = os.environ.get("TRANSFORMERS_TINY_MODEL_PATH", "hf-internal-testing") if TRANSFORMERS_TINY_MODEL_PATH == "hf-internal-testing": TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(Path(__file__).parent.parent, "tests/utils/tiny_model_summary.json") else: TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, "reports", "tiny_model_summary.json") with open(TINY_MODEL_SUMMARY_FILE_PATH) as fp: tiny_model_summary = json.load(fp) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent, "src/transformers") transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) logger = logging.get_logger(__name__) class PipelineTesterMixin: model_tester = None pipeline_model_mapping = None supported_frameworks = ["pt", "tf"] def run_task_tests(self, task): if task not in self.pipeline_model_mapping: self.skipTest( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: `{task}` is not in " f"`self.pipeline_model_mapping` for `{self.__class__.__name__}`." ) model_architectures = self.pipeline_model_mapping[task] if not isinstance(model_architectures, tuple): model_architectures = (model_architectures,) if not isinstance(model_architectures, tuple): raise ValueError(f"`model_architectures` must be a tuple. Got {type(model_architectures)} instead.") for model_architecture in model_architectures: model_arch_name = model_architecture.__name__ for _prefix in ["Flax", "TF"]: if model_arch_name.startswith(_prefix): model_arch_name = model_arch_name[len(_prefix) :] break tokenizer_names = [] processor_names = [] commit = None if model_arch_name in tiny_model_summary: tokenizer_names = tiny_model_summary[model_arch_name]["tokenizer_classes"] processor_names = tiny_model_summary[model_arch_name]["processor_classes"] if "sha" in tiny_model_summary[model_arch_name]: commit = tiny_model_summary[model_arch_name]["sha"] tokenizer_names = [None] if len(tokenizer_names) == 0 else tokenizer_names processor_names = [None] if len(processor_names) == 0 else processor_names repo_name = f"tiny-random-{model_arch_name}" if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing": repo_name = model_arch_name self.run_model_pipeline_tests( task, repo_name, model_architecture, tokenizer_names, processor_names, commit ) def run_model_pipeline_tests(self, task, repo_name, model_architecture, tokenizer_names, processor_names, commit): pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__ for tokenizer_name in tokenizer_names: for processor_name in processor_names: if self.is_pipeline_test_to_skip( pipeline_test_class_name, model_architecture.config_class, model_architecture, tokenizer_name, processor_name, ): logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: test is " f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer " f"`{tokenizer_name}` | processor `{processor_name}`." ) continue self.run_pipeline_test(task, repo_name, model_architecture, tokenizer_name, processor_name, commit) def run_pipeline_test(self, task, repo_name, model_architecture, tokenizer_name, processor_name, commit): repo_id = f"{TRANSFORMERS_TINY_MODEL_PATH}/{repo_name}" if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing": model_type = model_architecture.config_class.model_type repo_id = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, model_type, repo_name) tokenizer = None if tokenizer_name is not None: tokenizer_class = getattr(transformers_module, tokenizer_name) tokenizer = tokenizer_class.from_pretrained(repo_id, revision=commit) processor = None if processor_name is not None: processor_class = getattr(transformers_module, processor_name) try: processor = processor_class.from_pretrained(repo_id, revision=commit) except Exception: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not load the " f"processor from `{repo_id}` with `{processor_name}`." ) return if tokenizer is None and processor is None: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not find or load " f"any tokenizer / processor from `{repo_id}`." ) return try: model = model_architecture.from_pretrained(repo_id, revision=commit) except Exception: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not find or load " f"the model from `{repo_id}` with `{model_architecture}`." ) return pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__ if self.is_pipeline_test_to_skip_more(pipeline_test_class_name, model.config, model, tokenizer, processor): logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: test is " f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer " f"`{tokenizer_name}` | processor `{processor_name}`." ) return validate_test_components(self, task, model, tokenizer, processor) if hasattr(model, "eval"): model = model.eval() task_test = pipeline_test_mapping[task]["test"]() pipeline, examples = task_test.get_test_pipeline(model, tokenizer, processor) if pipeline is None: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not get the " "pipeline for testing." ) return task_test.run_pipeline_test(pipeline, examples) def run_batch_test(pipeline, examples): if pipeline.tokenizer is not None and pipeline.tokenizer.pad_token_id is None: return def data(n): for _ in range(n): yield copy.deepcopy(random.choice(examples)) out = [] if task == "conversational": for item in pipeline(data(10), batch_size=4, max_new_tokens=5): out.append(item) else: for item in pipeline(data(10), batch_size=4): out.append(item) self.assertEqual(len(out), 10) run_batch_test(pipeline, examples) @is_pipeline_test def test_pipeline_audio_classification(self): self.run_task_tests(task="audio-classification") @is_pipeline_test def test_pipeline_automatic_speech_recognition(self): self.run_task_tests(task="automatic-speech-recognition") @is_pipeline_test def test_pipeline_conversational(self): self.run_task_tests(task="conversational") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_depth_estimation(self): self.run_task_tests(task="depth-estimation") @is_pipeline_test @require_pytesseract @require_torch @require_vision def test_pipeline_document_question_answering(self): self.run_task_tests(task="document-question-answering") @is_pipeline_test def test_pipeline_feature_extraction(self): self.run_task_tests(task="feature-extraction") @is_pipeline_test def test_pipeline_fill_mask(self): self.run_task_tests(task="fill-mask") @is_pipeline_test @require_torch_or_tf @require_vision def test_pipeline_image_classification(self): self.run_task_tests(task="image-classification") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_image_segmentation(self): self.run_task_tests(task="image-segmentation") @is_pipeline_test @require_vision def test_pipeline_image_to_text(self): self.run_task_tests(task="image-to-text") @unittest.skip(reason="`run_pipeline_test` is currently not implemented.") @is_pipeline_test @require_vision @require_torch def test_pipeline_mask_generation(self): self.run_task_tests(task="mask-generation") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_object_detection(self): self.run_task_tests(task="object-detection") @is_pipeline_test def test_pipeline_question_answering(self): self.run_task_tests(task="question-answering") @is_pipeline_test def test_pipeline_summarization(self): self.run_task_tests(task="summarization") @is_pipeline_test def test_pipeline_table_question_answering(self): self.run_task_tests(task="table-question-answering") @is_pipeline_test def test_pipeline_text2text_generation(self): self.run_task_tests(task="text2text-generation") @is_pipeline_test def test_pipeline_text_classification(self): self.run_task_tests(task="text-classification") @is_pipeline_test @require_torch_or_tf def test_pipeline_text_generation(self): self.run_task_tests(task="text-generation") @is_pipeline_test @require_torch def test_pipeline_text_to_audio(self): self.run_task_tests(task="text-to-audio") @is_pipeline_test def test_pipeline_token_classification(self): self.run_task_tests(task="token-classification") @is_pipeline_test def test_pipeline_translation(self): self.run_task_tests(task="translation") @is_pipeline_test @require_torch_or_tf @require_vision @require_decord def test_pipeline_video_classification(self): self.run_task_tests(task="video-classification") @is_pipeline_test @require_torch @require_vision def test_pipeline_visual_question_answering(self): self.run_task_tests(task="visual-question-answering") @is_pipeline_test def test_pipeline_zero_shot(self): self.run_task_tests(task="zero-shot") @is_pipeline_test @require_torch def test_pipeline_zero_shot_audio_classification(self): self.run_task_tests(task="zero-shot-audio-classification") @is_pipeline_test @require_vision def test_pipeline_zero_shot_image_classification(self): self.run_task_tests(task="zero-shot-image-classification") @is_pipeline_test @require_vision @require_torch def test_pipeline_zero_shot_object_detection(self): self.run_task_tests(task="zero-shot-object-detection") def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "DocumentQuestionAnsweringPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): return True return False def is_pipeline_test_to_skip_more(self, pipeline_test_casse_name, config, model, tokenizer, processor): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer is not None and getattr(tokenizer, "pad_token", None) is None and not tokenizer.__class__.__name__.endswith("Fast") ): return True return False def validate_test_components(test_case, task, model, tokenizer, processor): if model.__class__.__name__ == "BlenderbotForCausalLM": model.config.encoder_no_repeat_ngram_size = 0 CONFIG_WITHOUT_VOCAB_SIZE = ["CanineConfig"] if tokenizer is not None: config_vocab_size = getattr(model.config, "vocab_size", None) if config_vocab_size is None: if hasattr(model.config, "text_config"): config_vocab_size = getattr(model.config.text_config, "vocab_size", None) elif hasattr(model.config, "text_encoder"): config_vocab_size = getattr(model.config.text_encoder, "vocab_size", None) if config_vocab_size is None and model.config.__class__.__name__ not in CONFIG_WITHOUT_VOCAB_SIZE: raise ValueError( "Could not determine `vocab_size` from model configuration while `tokenizer` is not `None`." )
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license to overwrite at feature extractactor specific tests test padding for listint numpy maxlength parameter has to be provided when setting paddingmaxlength test padding for padtomultipleof for listint numpy check padding value is correct truncate to smallest truncate to smallest with np since truncation forces padding to be smaller than longest input function can t return np ndarray but has to return list truncate to middle since truncation forces padding to be smaller than longest input function can t return np ndarray but has to return list padding has to be maxlength when setting truncationtrue padding has to be maxlength when setting truncationtrue padding has to be maxlength when setting truncationtrue maxlength parameter has to be provided when setting truncationtrue and paddingmaxlength test truncation for padtomultipleof for listint numpy retrieve expectedlength as multiple of padtomultipleof coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license to overwrite at feature extractactor specific tests test padding for list int numpy max_length parameter has to be provided when setting padding max_length test padding for pad_to_multiple_of for list int numpy check padding value is correct truncate to smallest truncate to smallest with np since truncation forces padding to be smaller than longest input function can t return np ndarray but has to return list truncate to middle since truncation forces padding to be smaller than longest input function can t return np ndarray but has to return list padding has to be max_length when setting truncation true padding has to be max_length when setting truncation true padding has to be max_length when setting truncation true max_length parameter has to be provided when setting truncation true and padding max_length test truncation for pad_to_multiple_of for list int numpy retrieve expected_length as multiple of pad_to_multiple_of
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class SequenceFeatureExtractionTestMixin(FeatureExtractionSavingTestMixin): feat_extract_tester = None feature_extraction_class = None @property def feat_extract_dict(self): return self.feat_extract_tester.prepare_feat_extract_dict() def test_feat_extract_common_properties(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feat_extract, "feature_size")) self.assertTrue(hasattr(feat_extract, "sampling_rate")) self.assertTrue(hasattr(feat_extract, "padding_value")) def test_batch_feature(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name]))) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) @require_torch def test_batch_feature_pt(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) @require_tf def test_batch_feature_tf(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="tf") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) def _check_padding(self, numpify=False): def _inputs_have_equal_length(input): length = len(input[0]) for input_slice in input[1:]: if len(input_slice) != length: return False return True def _inputs_are_equal(input_1, input_2): if len(input_1) != len(input_2): return False for input_slice_1, input_slice_2 in zip(input_1, input_2): if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3): return False return True feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) pad_diff = self.feat_extract_tester.seq_length_diff pad_max_length = self.feat_extract_tester.max_seq_length + pad_diff pad_min_length = self.feat_extract_tester.min_seq_length batch_size = self.feat_extract_tester.batch_size feature_size = self.feat_extract_tester.feature_size input_1 = feat_extract.pad(processed_features, padding=False) input_1 = input_1[input_name] input_2 = feat_extract.pad(processed_features, padding="longest") input_2 = input_2[input_name] input_3 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[-1])) input_3 = input_3[input_name] input_4 = feat_extract.pad(processed_features, padding="longest", return_tensors="np") input_4 = input_4[input_name] with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="max_length")[input_name] input_5 = feat_extract.pad( processed_features, padding="max_length", max_length=pad_max_length, return_tensors="np" ) input_5 = input_5[input_name] self.assertFalse(_inputs_have_equal_length(input_1)) self.assertTrue(_inputs_have_equal_length(input_2)) self.assertTrue(_inputs_have_equal_length(input_3)) self.assertTrue(_inputs_are_equal(input_2, input_3)) self.assertTrue(len(input_1[0]) == pad_min_length) self.assertTrue(len(input_1[1]) == pad_min_length + pad_diff) self.assertTrue(input_4.shape[:2] == (batch_size, len(input_3[0]))) self.assertTrue(input_5.shape[:2] == (batch_size, pad_max_length)) if feature_size > 1: self.assertTrue(input_4.shape[2] == input_5.shape[2] == feature_size) input_6 = feat_extract.pad(processed_features, pad_to_multiple_of=10) input_6 = input_6[input_name] input_7 = feat_extract.pad(processed_features, padding="longest", pad_to_multiple_of=10) input_7 = input_7[input_name] input_8 = feat_extract.pad( processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length ) input_8 = input_8[input_name] input_9 = feat_extract.pad( processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length, return_tensors="np", ) input_9 = input_9[input_name] self.assertTrue(all(len(x) % 10 == 0 for x in input_6)) self.assertTrue(_inputs_are_equal(input_6, input_7)) expected_mult_pad_length = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(x) == expected_mult_pad_length for x in input_8)) self.assertEqual(input_9.shape[:2], (batch_size, expected_mult_pad_length)) if feature_size > 1: self.assertTrue(input_9.shape[2] == feature_size) padding_vector_sum = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_2[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3 ) self.assertTrue( abs( np.asarray(input_2[1])[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_2[2])[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_5[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3 ) self.assertTrue( abs(input_9[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length)) < 1e-3 ) def _check_truncation(self, numpify=False): def _inputs_have_equal_length(input): length = len(input[0]) for input_slice in input[1:]: if len(input_slice) != length: return False return True def _inputs_are_equal(input_1, input_2): if len(input_1) != len(input_2): return False for input_slice_1, input_slice_2 in zip(input_1, input_2): if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3): return False return True feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) input_1 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), truncation=True ) input_1 = input_1[input_name] input_2 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[0])) input_2 = input_2[input_name] self.assertTrue(_inputs_have_equal_length(input_1)) self.assertFalse(_inputs_have_equal_length(input_2)) input_3 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np", truncation=True, ) input_3 = input_3[input_name] input_4 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np" ) input_4 = input_4[input_name] self.assertTrue(_inputs_have_equal_length(input_3)) self.assertTrue(input_3.shape[1] == len(speech_inputs[0])) self.assertFalse(_inputs_have_equal_length(input_4)) input_5 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True, return_tensors="np", ) input_5 = input_5[input_name] input_6 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True ) input_6 = input_6[input_name] input_7 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), return_tensors="np" ) input_7 = input_7[input_name] self.assertTrue(input_5.shape[1] == len(speech_inputs[1])) self.assertTrue(_inputs_have_equal_length(input_5)) self.assertTrue(_inputs_have_equal_length(input_6)) self.assertTrue(_inputs_are_equal(input_5, input_6)) self.assertFalse(_inputs_have_equal_length(input_7)) self.assertTrue(len(input_7[-1]) == len(speech_inputs[-1])) with self.assertRaises(ValueError): feat_extract.pad(processed_features, truncation=True)[input_name] with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name] with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name] with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="max_length", truncation=True)[input_name] pad_to_multiple_of = 12 input_8 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), pad_to_multiple_of=pad_to_multiple_of, truncation=True, ) input_8 = input_8[input_name] input_9 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), pad_to_multiple_of=pad_to_multiple_of, ) input_9 = input_9[input_name] expected_length = len(speech_inputs[0]) if expected_length % pad_to_multiple_of != 0: expected_length = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_8[0]) == expected_length) self.assertTrue(_inputs_have_equal_length(input_8)) self.assertFalse(_inputs_have_equal_length(input_9)) def test_padding_from_list(self): self._check_padding(numpify=False) def test_padding_from_array(self): self._check_padding(numpify=True) def test_truncation_from_list(self): self._check_truncation(numpify=False) def test_truncation_from_array(self): self._check_truncation(numpify=True) @require_torch def test_padding_accepts_tensors_pt(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().astype(np.float32).sum()) < 1e-2) @require_tf def test_padding_accepts_tensors_tf(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_tf = feat_extract.pad(processed_features, padding="longest", return_tensors="tf")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_tf.numpy().astype(np.float32).sum()) < 1e-2) def test_attention_mask(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) processed = feat_extract.pad(processed, padding="longest", return_tensors="np") self.assertIn("attention_mask", processed) self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lengths) def test_attention_mask_with_truncation(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) max_length = min(input_lengths) processed_pad = feat_extract.pad( processed, padding="max_length", max_length=max_length, truncation=True, return_tensors="np" ) self.assertIn("attention_mask", processed_pad) self.assertListEqual( list(processed_pad.attention_mask.shape), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist(), [max_length for x in speech_inputs] )
codingutf8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the tokenizer this check we did call the fake head request a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the tokenizer this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in the current folder and have the right name we skip the test if the user has a tokenizer json in this folder to avoid deleting it the tiny random bert has a vocab size of 1024 tiny gpt2 as a vocab size of 1000 tokenizer should depend on the remote checkpoint not the local tokenizer json file this test is for deprecated behavior and can be removed in v5 reset repo push to hub via savepretrained reset repo push to hub via savepretrained no fast custom tokenizer can t make an isinstance check because the newmodel config is from the customtokenizer class of a dynamic module fast and slow custom tokenizer can t make an isinstance check because the newmodel config is from the fakeconfig class of a dynamic module can t make an isinstance check because the newmodel config is from the fakeconfig class of a dynamic module even if the offsets are wrong we necessarily output correct string parts coding utf 8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the tokenizer this check we did call the fake head request a mock response for an http head request to emulate server down download this model to make sure it s in the cache under the mock environment we get a 500 error when trying to reach the tokenizer this check we did call the fake head request this test is for deprecated behavior and can be removed in v5 supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in the current folder and have the right name we skip the test if the user has a tokenizer json in this folder to avoid deleting it the tiny random bert has a vocab size of 1024 tiny gpt2 as a vocab size of 1000 tokenizer should depend on the remote checkpoint not the local tokenizer json file this test is for deprecated behavior and can be removed in v5 reset repo push to hub via save_pretrained reset repo push to hub via save_pretrained no fast custom tokenizer can t make an isinstance check because the new_model config is from the customtokenizer class of a dynamic module fast and slow custom tokenizer can t make an isinstance check because the new_model config is from the fakeconfig class of a dynamic module can t make an isinstance check because the new_model config is from the fakeconfig class of a dynamic module even if the offsets are wrong we necessarily output correct string parts
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPT2TokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class TokenizerUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") mock_head.assert_called() @require_tokenizers def test_cached_files_are_used_when_internet_is_down_missing_files(self): response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} _ = GPT2TokenizerFast.from_pretrained("gpt2") with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = GPT2TokenizerFast.from_pretrained("gpt2") mock_head.assert_called() def test_legacy_load_from_one_file(self): try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", f) _ = AlbertTokenizer.from_pretrained(tmp_file) finally: os.remove(tmp_file) if os.path.isfile("tokenizer.json"): return try: with open("tokenizer.json", "wb") as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", f) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") self.assertEqual(tokenizer.vocab_size, 1000) finally: os.remove("tokenizer.json") def test_legacy_load_from_url(self): _ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model") @is_staging_test class TokenizerPushToHubTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-tokenizer") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer") except HTTPError: pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub("test-tokenizer", token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) delete_repo(token=self._token, repo_id="test-tokenizer") with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir, repo_id="test-tokenizer", push_to_hub=True, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub("valid_org/test-tokenizer-org", token=self._token) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org") with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( tmp_dir, repo_id="valid_org/test-tokenizer-org", push_to_hub=True, token=self._token ) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) @require_tokenizers def test_push_to_hub_dynamic_tokenizer(self): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = CustomTokenizer(vocab_file) tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) bert_tokenizer = BertTokenizerFast.from_pretrained(tmp_dir) bert_tokenizer.save_pretrained(tmp_dir) tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir) tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast") tokenizer = AutoTokenizer.from_pretrained( f"{USER}/test-dynamic-tokenizer", use_fast=False, trust_remote_code=True ) self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") class TrieTest(unittest.TestCase): def test_trie(self): trie = Trie() trie.add("Hello 友達") self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}) trie.add("Hello") trie.data self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}) def test_trie_split(self): trie = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS] This is a extra_id_100"]) trie.add("[CLS]") trie.add("extra_id_1") trie.add("extra_id_100") self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS]", " This is a ", "extra_id_100"]) def test_trie_single(self): trie = Trie() trie.add("A") self.assertEqual(trie.split("ABC"), ["A", "BC"]) self.assertEqual(trie.split("BCA"), ["BC", "A"]) def test_trie_final(self): trie = Trie() trie.add("TOKEN]") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_subtokens(self): trie = Trie() trie.add("A") trie.add("P") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_suffix_tokens(self): trie = Trie() trie.add("AB") trie.add("B") trie.add("C") self.assertEqual(trie.split("ABC"), ["AB", "C"]) def test_trie_skip(self): trie = Trie() trie.add("ABC") trie.add("B") trie.add("CD") self.assertEqual(trie.split("ABCD"), ["ABC", "D"]) def test_cut_text_hardening(self): trie = Trie() parts = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3]) self.assertEqual(parts, ["AB", "C"])
codingutf8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license inclusion of 2 tokenizers to test different types of models unigram and wordlevel for the moment we disable this test for pretrainedtokenizerfast because it is the only tokenizer that is not linked to any model we disable this test for pretrainedtokenizerfast because it is the only tokenizer that is not linked to any model we disable this test for pretrainedtokenizerfast because it is the only tokenizer that is not linked to any model pretrainedtokenizerfast doesn t have tokenizerfile in its signature here we want to test the 2 available tokenizers that use 2 different types of models unigram and wordlevel even if the test fails we must be sure that the folder is deleted and that the default tokenizer is restored here we want to test the 2 available tokenizers that use 2 different types of models unigram and wordlevel even if the test fails we must be sure that the folder is deleted and that the default tokenizer is restored enable padding note even if the model has a default maxlength it is not used thus toksentences truncation true does nothing and does not warn either hack to save this in the tokenizerconfig json this should pick the new tokenizer file as the version of transformers is 4 0 0 will need to be adjusted if we reach v42 and this test is still here should pick the old tokenizer file as the version of transformers is 4 0 0 this repo has two tokenizer files one for v4 0 0 and above with an added token one for versions lower this should pick the new tokenizer file as the version of transformers is 4 0 0 testing an older version by monkeypatching the version in the module it s used see https github comhuggingfacetransformerspull12550 and https github comhuggingfacetokenizersissues537 coding utf 8 2019 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license because we don t have pretrained_vocab_files_map inclusion of 2 tokenizers to test different types of models unigram and wordlevel for the moment we disable this test for pretrainedtokenizerfast because it is the only tokenizer that is not linked to any model we disable this test for pretrainedtokenizerfast because it is the only tokenizer that is not linked to any model we disable this test for pretrainedtokenizerfast because it is the only tokenizer that is not linked to any model pretrainedtokenizerfast doesn t have tokenizer_file in its signature here we want to test the 2 available tokenizers that use 2 different types of models unigram and wordlevel even if the test fails we must be sure that the folder is deleted and that the default tokenizer is restored here we want to test the 2 available tokenizers that use 2 different types of models unigram and wordlevel even if the test fails we must be sure that the folder is deleted and that the default tokenizer is restored enable padding fmt skip note even if the model has a default max_length it is not used thus tok sentences truncation true does nothing and does not warn either fmt skip hack to save this in the tokenizer_config json this should pick the new tokenizer file as the version of transformers is 4 0 0 will need to be adjusted if we reach v42 and this test is still here should pick the old tokenizer file as the version of transformers is 4 0 0 this repo has two tokenizer files one for v4 0 0 and above with an added token one for versions lower this should pick the new tokenizer file as the version of transformers is 4 0 0 testing an older version by monkey patching the version in the module it s used see https github com huggingface transformers pull 12550 and https github com huggingface tokenizers issues 537
import concurrent.futures import json import os import shutil import tempfile import unittest from transformers import AutoTokenizer, PreTrainedTokenizerFast from transformers.testing_utils import require_tokenizers from ..test_tokenization_common import TokenizerTesterMixin @require_tokenizers class PreTrainedTokenizationFastTest(TokenizerTesterMixin, unittest.TestCase): rust_tokenizer_class = PreTrainedTokenizerFast test_slow_tokenizer = False test_rust_tokenizer = True from_pretrained_vocab_key = "tokenizer_file" def setUp(self): self.test_rust_tokenizer = False super().setUp() self.test_rust_tokenizer = True model_paths = ["robot-test/dummy-tokenizer-fast", "robot-test/dummy-tokenizer-wordlevel"] self.bytelevel_bpe_model_name = "SaulLu/dummy-tokenizer-bytelevel-bpe" self.tokenizers_list = [(PreTrainedTokenizerFast, model_path, {}) for model_path in model_paths] tokenizer = PreTrainedTokenizerFast.from_pretrained(model_paths[0]) tokenizer.save_pretrained(self.tmpdirname) def test_tokenizer_mismatch_warning(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_encode_decode_with_spaces(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_added_tokens_serialization(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_additional_special_tokens_serialization(self): pass def test_pretrained_model_lists(self): pass def test_prepare_for_model(self): pass def test_rust_tokenizer_signature(self): pass def test_training_new_tokenizer(self): tmpdirname_orig = self.tmpdirname for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer() finally: shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_special_tokens_change(self): tmpdirname_orig = self.tmpdirname for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer_with_special_tokens_change() finally: shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_bytelevel(self): tokenizer = self.rust_tokenizer_class.from_pretrained(self.bytelevel_bpe_model_name) toy_text_iterator = ("a" for _ in range(1000)) new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) encoding_ids = new_tokenizer.encode("a🤗") self.assertEqual(encoding_ids, [64, 172, 253, 97, 245]) def test_init_from_tokenizers_model(self): from tokenizers import Tokenizer sentences = ["Hello, y'all!", "How are you 😁 ? There should not be any issue right?"] tokenizer = Tokenizer.from_pretrained("t5-base") tokenizer.enable_padding(pad_id=0, pad_token="<pad>", length=512, pad_to_multiple_of=8) self.assertEqual( tokenizer.padding, { "length": 512, "pad_to_multiple_of": 8, "pad_id": 0, "pad_token": "<pad>", "pad_type_id": 0, "direction": "right", }, ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.pad_token_id, 0) self.assertEqual(tok.padding_side, "right") self.assertEqual(tok.pad_token, "<pad>") self.assertEqual(tok.init_kwargs["max_length"], 512) self.assertEqual(tok.init_kwargs["pad_to_multiple_of"], 8) self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) tokenizer.enable_truncation(8, stride=0, strategy="longest_first", direction="right") self.assertEqual( tokenizer.truncation, {"max_length": 8, "stride": 0, "strategy": "longest_first", "direction": "right"} ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.truncation_side, "right") self.assertEqual(tok.init_kwargs["truncation_strategy"], "longest_first") self.assertEqual(tok.init_kwargs["max_length"], 8) self.assertEqual(tok.init_kwargs["stride"], 0) self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) @require_tokenizers class TokenizerVersioningTest(unittest.TestCase): def test_local_versioning(self): tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) json_tokenizer["model"]["vocab"]["huggingface"] = len(tokenizer) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.4.0.0.json"] tokenizer.save_pretrained(tmp_dir) json.dump(json_tokenizer, open(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), "w")) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer) + 1) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) shutil.move(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), os.path.join(tmp_dir, "tokenizer.42.0.0.json")) tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.42.0.0.json"] tokenizer.save_pretrained(tmp_dir) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer)) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) def test_repo_versioning(self): repo = "hf-internal-testing/test-two-tokenizers" tokenizer = AutoTokenizer.from_pretrained(repo) self.assertEqual(len(tokenizer), 28997) json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) import transformers as old_transformers old_transformers.tokenization_utils_base.__version__ = "3.0.0" old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo) self.assertEqual(len(old_tokenizer), 28996) json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) @require_tokenizers class ReduceMutableBorrowTests(unittest.TestCase): def test_async_share_tokenizer(self): tokenizer = PreTrainedTokenizerFast.from_pretrained("robot-test/dummy-tokenizer-wordlevel") text = "The Matrix is a 1999 science fiction action film." with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(self.fetch, tokenizer, text) for i in range(10)] return_value = [future.result() for future in futures] self.assertEqual(return_value, [[1, 10, 0, 8, 0, 18, 0, 0, 0, 2] for i in range(10)]) def fetch(self, tokenizer, text): return tokenizer.encode(text, truncation="longest_first", padding="longest")
codingutf8 2018 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license isort skipfile ensure isfast is correctly restored ensure encodings are potentially correctly restored ensure the keys are the same python no tensor test converting the converted test converting the converted test converting the converted test converting the converted coding utf 8 2018 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license isort skip_file ensure is_fast is correctly restored ensure encodings are potentially correctly restored ensure the keys are the same python no tensor test converting the converted test converting the converted test converting the converted test converting the converted
import os import pickle import tempfile import unittest from typing import Callable, Optional import numpy as np from transformers import ( BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerFast, TensorType, TokenSpan, is_tokenizers_available, ) from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import CaptureStderr, require_flax, require_tf, require_tokenizers, require_torch, slow if is_tokenizers_available(): from tokenizers import Tokenizer from tokenizers.models import WordPiece class TokenizerUtilsTest(unittest.TestCase): def check_tokenizer_from_pretrained(self, tokenizer_class): s3_models = list(tokenizer_class.max_model_input_sizes.keys()) for model_name in s3_models[:1]: tokenizer = tokenizer_class.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, tokenizer_class) self.assertIsInstance(tokenizer, PreTrainedTokenizer) for special_tok in tokenizer.all_special_tokens: self.assertIsInstance(special_tok, str) special_tok_id = tokenizer.convert_tokens_to_ids(special_tok) self.assertIsInstance(special_tok_id, int) def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None): batch_encoding_str = pickle.dumps(be_original) self.assertIsNotNone(batch_encoding_str) be_restored = pickle.loads(batch_encoding_str) self.assertEqual(be_restored.is_fast, be_original.is_fast) if be_original.is_fast: self.assertIsNotNone(be_restored.encodings) else: self.assertIsNone(be_restored.encodings) for original_v, restored_v in zip(be_original.values(), be_restored.values()): if equal_op: self.assertTrue(equal_op(restored_v, original_v)) else: self.assertEqual(restored_v, original_v) @slow def test_pretrained_tokenizers(self): self.check_tokenizer_from_pretrained(GPT2Tokenizer) def test_tensor_type_from_str(self): self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW) self.assertEqual(TensorType("pt"), TensorType.PYTORCH) self.assertEqual(TensorType("np"), TensorType.NUMPY) @require_tokenizers def test_batch_encoding_pickle(self): import numpy as np tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_p("Small example to encode")) with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) with self.subTest("BatchEncoding (Rust, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_r("Small example to encode")) with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) @require_tf @require_tokenizers def test_batch_encoding_pickle_tf(self): import tensorflow as tf def tf_array_equals(t1, t2): return tf.reduce_all(tf.equal(t1, t2)) tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) with self.subTest("BatchEncoding (Rust, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) @require_torch @require_tokenizers def test_batch_encoding_pickle_pt(self): import torch tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) @require_tokenizers def test_batch_encoding_is_fast(self): tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("Python Tokenizer"): self.assertFalse(tokenizer_p("Small example to_encode").is_fast) with self.subTest("Rust Tokenizer"): self.assertTrue(tokenizer_r("Small example to_encode").is_fast) @require_tokenizers def test_batch_encoding_word_to_tokens(self): tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True) self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2)) self.assertEqual(encoded.word_to_tokens(1), None) self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3)) def test_batch_encoding_with_labels(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_torch def test_batch_encoding_with_labels_pt(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_tf def test_batch_encoding_with_labels_tf(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="tf", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_flax def test_batch_encoding_with_labels_jax(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="jax", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) def test_padding_accepts_tensors(self): features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="np") self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_torch def test_padding_accepts_tensors_pt(self): import torch features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="pt") self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tf def test_padding_accepts_tensors_tf(self): import tensorflow as tf features = [{"input_ids": tf.constant([0, 1, 2])}, {"input_ids": tf.constant([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="tf") self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tokenizers def test_instantiation_from_tokenizers(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer) @require_tokenizers def test_instantiation_from_tokenizers_json_file(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) with tempfile.TemporaryDirectory() as tmpdirname: bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json")) PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json"))
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license ensure that the tensor and the agenttype s tensor are the same ensure the path remains even after the object deletion ensure that the file contains the same value as the original tensor ensure that the tensor and the agenttype s tensor are the same ensure the path remains even after the object deletion ensure the path remains even after the object deletion ensure the path remains even after the object deletion coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license ensure that the tensor and the agent_type s tensor are the same ensure the path remains even after the object deletion ensure that the file contains the same value as the original tensor ensure that the tensor and the agent_type s tensor are the same ensure the path remains even after the object deletion ensure the path remains even after the object deletion ensure the path remains even after the object deletion
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def get_new_path(suffix="") -> str: directory = tempfile.mkdtemp() return os.path.join(directory, str(uuid.uuid4()) + suffix) @require_soundfile @require_torch class AgentAudioTests(unittest.TestCase): def test_from_tensor(self): tensor = torch.rand(12, dtype=torch.float64) - 0.5 agent_type = AgentAudio(tensor) path = str(agent_type.to_string()) self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) del agent_type self.assertTrue(os.path.exists(path)) new_tensor, _ = sf.read(path) self.assertTrue(torch.allclose(tensor, torch.tensor(new_tensor), atol=1e-4)) def test_from_string(self): tensor = torch.rand(12, dtype=torch.float64) - 0.5 path = get_new_path(suffix=".wav") sf.write(path, tensor, 16000) agent_type = AgentAudio(path) self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) self.assertEqual(agent_type.to_string(), path) @require_vision @require_torch class AgentImageTests(unittest.TestCase): def test_from_tensor(self): tensor = torch.randint(0, 256, (64, 64, 3)) agent_type = AgentImage(tensor) path = str(agent_type.to_string()) self.assertTrue(torch.allclose(tensor, agent_type._tensor, atol=1e-4)) self.assertIsInstance(agent_type.to_raw(), Image.Image) del agent_type self.assertTrue(os.path.exists(path)) def test_from_string(self): path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" image = Image.open(path) agent_type = AgentImage(path) self.assertTrue(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) del agent_type self.assertTrue(os.path.exists(path)) def test_from_image(self): path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" image = Image.open(path) agent_type = AgentImage(image) self.assertFalse(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) del agent_type self.assertTrue(os.path.exists(path)) class AgentTextTests(unittest.TestCase): def test_from_string(self): string = "Hey!" agent_type = AgentText(string) self.assertEqual(string, agent_type.to_string()) self.assertEqual(string, agent_type.to_raw()) self.assertEqual(string, agent_type)
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from datasets import load_dataset from transformers import load_tool from .test_tools_common import ToolTesterMixin class DocumentQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("document-question-answering") self.tool.setup() self.remote_tool = load_tool("document-question-answering", remote=True) def test_exact_match_arg(self): dataset = load_dataset("hf-internal-testing/example-documents", split="test") document = dataset[0]["image"] result = self.tool(document, "When is the coffee break?") self.assertEqual(result, "11-14 to 11:39 a.m.") def test_exact_match_arg_remote(self): dataset = load_dataset("hf-internal-testing/example-documents", split="test") document = dataset[0]["image"] result = self.remote_tool(document, "When is the coffee break?") self.assertEqual(result, "11-14 to 11:39 a.m.") def test_exact_match_kwarg(self): dataset = load_dataset("hf-internal-testing/example-documents", split="test") document = dataset[0]["image"] self.tool(document=document, question="When is the coffee break?") def test_exact_match_kwarg_remote(self): dataset = load_dataset("hf-internal-testing/example-documents", split="test") document = dataset[0]["image"] result = self.remote_tool(document=document, question="When is the coffee break?") self.assertEqual(result, "11-14 to 11:39 a.m.")
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from pathlib import Path from transformers import is_vision_available, load_tool from transformers.testing_utils import get_tests_dir from .test_tools_common import ToolTesterMixin if is_vision_available(): from PIL import Image class ImageCaptioningToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("image-captioning") self.tool.setup() self.remote_tool = load_tool("image-captioning", remote=True) def test_exact_match_arg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image) self.assertEqual(result, "two cats sleeping on a couch") def test_exact_match_arg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image) self.assertEqual(result, "two cats sleeping on a couch") def test_exact_match_kwarg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image=image) self.assertEqual(result, "two cats sleeping on a couch") def test_exact_match_kwarg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image=image) self.assertEqual(result, "two cats sleeping on a couch")
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from pathlib import Path from transformers import is_vision_available, load_tool from transformers.testing_utils import get_tests_dir from .test_tools_common import ToolTesterMixin if is_vision_available(): from PIL import Image class ImageQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("image-question-answering") self.tool.setup() self.remote_tool = load_tool("image-question-answering", remote=True) def test_exact_match_arg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image, "How many cats are sleeping on the couch?") self.assertEqual(result, "2") def test_exact_match_arg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image, "How many cats are sleeping on the couch?") self.assertEqual(result, "2") def test_exact_match_kwarg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image=image, question="How many cats are sleeping on the couch?") self.assertEqual(result, "2") def test_exact_match_kwarg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image=image, question="How many cats are sleeping on the couch?") self.assertEqual(result, "2")
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from pathlib import Path from transformers import is_vision_available, load_tool from transformers.testing_utils import get_tests_dir from .test_tools_common import ToolTesterMixin if is_vision_available(): from PIL import Image class ImageSegmentationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("image-segmentation") self.tool.setup() self.remote_tool = load_tool("image-segmentation", remote=True) def test_exact_match_arg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image, "cat") self.assertTrue(isinstance(result, Image.Image)) def test_exact_match_arg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image, "cat") self.assertTrue(isinstance(result, Image.Image)) def test_exact_match_kwarg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image=image, label="cat") self.assertTrue(isinstance(result, Image.Image)) def test_exact_match_kwarg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image=image, label="cat") self.assertTrue(isinstance(result, Image.Image))
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fake function we will use as tool evaluate returns the value of the last assignment won t work without the tool evaluate returns the value of the last assignment evaluate returns the value of the last assignment evaluate returns the value of the last assignment evaluate returns the value of the last assignment coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fake function we will use as tool evaluate returns the value of the last assignment won t work without the tool evaluate returns the value of the last assignment evaluate returns the value of the last assignment evaluate returns the value of the last assignment evaluate returns the value of the last assignment
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def add_two(x): return x + 2 class PythonInterpreterTester(unittest.TestCase): def test_evaluate_assign(self): code = "x = 3" state = {} result = evaluate(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3}) code = "x = y" state = {"y": 5} result = evaluate(code, {}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 5, "y": 5}) def test_evaluate_call(self): code = "y = add_two(x)" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "y": 5}) with CaptureStdout() as out: result = evaluate(code, {}, state=state) assert result is None assert "tried to execute add_two" in out.out def test_evaluate_constant(self): code = "x = 3" state = {} result = evaluate(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3}) def test_evaluate_dict(self): code = "test_dict = {'x': x, 'y': add_two(x)}" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) self.assertDictEqual(result, {"x": 3, "y": 5}) self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}}) def test_evaluate_expression(self): code = "x = 3\ny = 5" state = {} result = evaluate(code, {}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "y": 5}) def test_evaluate_f_string(self): code = "text = f'This is x: {x}.'" state = {"x": 3} result = evaluate(code, {}, state=state) assert result == "This is x: 3." self.assertDictEqual(state, {"x": 3, "text": "This is x: 3."}) def test_evaluate_if(self): code = "if x <= 3:\n y = 2\nelse:\n y = 5" state = {"x": 3} result = evaluate(code, {}, state=state) assert result == 2 self.assertDictEqual(state, {"x": 3, "y": 2}) state = {"x": 8} result = evaluate(code, {}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 8, "y": 5}) def test_evaluate_list(self): code = "test_list = [x, add_two(x)]" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) self.assertListEqual(result, [3, 5]) self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]}) def test_evaluate_name(self): code = "y = x" state = {"x": 3} result = evaluate(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3, "y": 3}) def test_evaluate_subscript(self): code = "test_list = [x, add_two(x)]\ntest_list[1]" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]}) code = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}}) def test_evaluate_for(self): code = "x = 0\nfor i in range(3):\n x = i" state = {} result = evaluate(code, {"range": range}, state=state) assert result == 2 self.assertDictEqual(state, {"x": 2, "i": 2})
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import is_torch_available, load_tool from .test_tools_common import ToolTesterMixin if is_torch_available(): import torch class SpeechToTextToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("speech-to-text") self.tool.setup() def test_exact_match_arg(self): result = self.tool(torch.ones(3000)) self.assertEqual(result, " you") def test_exact_match_kwarg(self): result = self.tool(audio=torch.ones(3000)) self.assertEqual(result, " you")
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class TextClassificationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("text-classification") self.tool.setup() self.remote_tool = load_tool("text-classification", remote=True) def test_exact_match_arg(self): result = self.tool("That's quite cool", ["positive", "negative"]) self.assertEqual(result, "positive") def test_exact_match_arg_remote(self): result = self.remote_tool("That's quite cool", ["positive", "negative"]) self.assertEqual(result, "positive") def test_exact_match_kwarg(self): result = self.tool(text="That's quite cool", labels=["positive", "negative"]) self.assertEqual(result, "positive") def test_exact_match_kwarg_remote(self): result = self.remote_tool(text="That's quite cool", labels=["positive", "negative"]) self.assertEqual(result, "positive")
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license speecht5 isn t deterministic speecht5 isn t deterministic coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license speecht5 isn t deterministic speecht5 isn t deterministic
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class TextToSpeechToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("text-to-speech") self.tool.setup() def test_exact_match_arg(self): torch.manual_seed(0) result = self.tool("hey") resulting_tensor = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]), ) ) def test_exact_match_kwarg(self): torch.manual_seed(0) result = self.tool("hey") resulting_tensor = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]), ) )
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license there is a single output should not raise an error coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license there is a single output should not raise an error
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image authorized_types = ["text", "image", "audio"] def create_inputs(input_types: List[str]): inputs = [] for input_type in input_types: if input_type == "text": inputs.append("Text input") elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((512, 512)) ) elif input_type == "audio": inputs.append(torch.ones(3000)) elif isinstance(input_type, list): inputs.append(create_inputs(input_type)) else: raise ValueError(f"Invalid type requested: {input_type}") return inputs def output_types(outputs: List): output_types = [] for output in outputs: if isinstance(output, (str, AgentText)): output_types.append("text") elif isinstance(output, (Image.Image, AgentImage)): output_types.append("image") elif isinstance(output, (torch.Tensor, AgentAudio)): output_types.append("audio") else: raise ValueError(f"Invalid output: {output}") return output_types @is_tool_test class ToolTesterMixin: def test_inputs_outputs(self): self.assertTrue(hasattr(self.tool, "inputs")) self.assertTrue(hasattr(self.tool, "outputs")) inputs = self.tool.inputs for _input in inputs: if isinstance(_input, list): for __input in _input: self.assertTrue(__input in authorized_types) else: self.assertTrue(_input in authorized_types) outputs = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types) def test_call(self): inputs = create_inputs(self.tool.inputs) outputs = self.tool(*inputs) if len(self.tool.outputs) == 1: outputs = [outputs] self.assertListEqual(output_types(outputs), self.tool.outputs) def test_common_attributes(self): self.assertTrue(hasattr(self.tool, "description")) self.assertTrue(hasattr(self.tool, "default_checkpoint")) self.assertTrue(self.tool.description.startswith("This is a tool that")) def test_agent_types_outputs(self): inputs = create_inputs(self.tool.inputs) outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs)) for output, output_type in zip(outputs, self.tool.outputs): agent_type = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(output, agent_type)) def test_agent_types_inputs(self): inputs = create_inputs(self.tool.inputs) _inputs = [] for _input, input_type in zip(inputs, self.tool.inputs): if isinstance(input_type, list): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs))
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license there is a single output should not raise an error coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license there is a single output should not raise an error
import unittest from transformers import load_tool from transformers.tools.agent_types import AGENT_TYPE_MAPPING from .test_tools_common import ToolTesterMixin, output_types class TranslationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("translation") self.tool.setup() self.remote_tool = load_tool("translation", remote=True) def test_exact_match_arg(self): result = self.tool("Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_arg_remote(self): result = self.remote_tool("Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_kwarg(self): result = self.tool(text="Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_kwarg_remote(self): result = self.remote_tool(text="Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_call(self): inputs = ["Hey, what's up?", "English", "Spanish"] outputs = self.tool(*inputs) if len(self.tool.outputs) == 1: outputs = [outputs] self.assertListEqual(output_types(outputs), self.tool.outputs) def test_agent_types_outputs(self): inputs = ["Hey, what's up?", "English", "Spanish"] outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs)) for output, output_type in zip(outputs, self.tool.outputs): agent_type = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(output, agent_type)) def test_agent_types_inputs(self): inputs = ["Hey, what's up?", "English", "Spanish"] _inputs = [] for _input, input_type in zip(inputs, self.tool.inputs): if isinstance(input_type, list): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs))
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license with labelids features can already be tensors labels can already be tensors with labelids expect error due to padding token missing features can already be tensors expect error due to odd sequence length with labelids features can already be tensors labels can already be tensors confirms that numpy inputs are handled correctly even when scalars with labelids expect error due to padding token missing self asserttrueallx 100 for x in batchlabels numpymaskedtokens numpy tolist self asserttrueallx 100 for x in batchlabels numpymaskedtokens numpy tolist self asserttrueallx 100 for x in batchlabels numpymaskedtokens numpy tolist self asserttrueallx 100 for x in batchlabels numpymaskedtokens numpy tolist features can already be tensors expect error due to odd sequence length with labelids features can already be tensors labels can already be tensors with labelids expect error due to padding token missing self asserttrueallx 100 for x in batchlabelsmaskedtokens tolist self asserttrueallx 100 for x in batchlabelsmaskedtokens tolist self asserttrueallx 100 for x in batchlabelsmaskedtokens tolist self asserttrueallx 100 for x in batchlabelsmaskedtokens tolist features can already be tensors expect error due to odd sequence length 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license with label_ids features can already be tensors labels can already be tensors with label_ids expect error due to padding token missing for reproducibility features can already be tensors expect error due to odd sequence length with label_ids features can already be tensors labels can already be tensors confirms that numpy inputs are handled correctly even when scalars with label_ids expect error due to padding token missing for reproducibility self asserttrue all x 100 for x in batch labels numpy masked_tokens numpy tolist self asserttrue all x 100 for x in batch labels numpy masked_tokens numpy tolist self asserttrue all x 100 for x in batch labels numpy masked_tokens numpy tolist self asserttrue all x 100 for x in batch labels numpy masked_tokens numpy tolist features can already be tensors expect error due to odd sequence length with label_ids features can already be tensors labels can already be tensors with label_ids expect error due to padding token missing for reproducibility self asserttrue all x 100 for x in batch labels masked_tokens tolist self asserttrue all x 100 for x in batch labels masked_tokens tolist self asserttrue all x 100 for x in batch labels masked_tokens tolist self asserttrue all x 100 for x in batch labels masked_tokens tolist features can already be tensors expect error due to odd sequence length
import os import shutil import tempfile import unittest import numpy as np from transformers import ( BertTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, default_data_collator, is_tf_available, is_torch_available, set_seed, ) from transformers.testing_utils import require_tf, require_torch if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_torch class DataCollatorIntegrationTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt") with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_default_with_dict(self): features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features) self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8))))) self.assertEqual(batch["labels"].dtype, torch.long) self.assertEqual(batch["inputs"].shape, torch.Size([8, 6])) features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features) self.assertTrue(batch["labels"].equal(torch.tensor([[0, 1, 2]] * 8))) self.assertEqual(batch["labels"].dtype, torch.long) self.assertEqual(batch["inputs"].shape, torch.Size([8, 6])) features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features) self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8))))) self.assertEqual(batch["labels"].dtype, torch.long) self.assertEqual(batch["inputs"].shape, torch.Size([8, 10])) features = [{"label": torch.tensor(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features) self.assertEqual(batch["labels"].dtype, torch.long) self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8))))) self.assertEqual(batch["labels"].dtype, torch.long) self.assertEqual(batch["inputs"].shape, torch.Size([8, 10])) def test_default_classification_and_regression(self): data_collator = default_data_collator features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)] batch = data_collator(features) self.assertEqual(batch["labels"].dtype, torch.long) features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)] batch = data_collator(features) self.assertEqual(batch["labels"].dtype, torch.float) def test_default_with_no_labels(self): features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features) self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape, torch.Size([8, 6])) features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features) self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape, torch.Size([8, 6])) def test_data_collator_with_padding(self): tokenizer = BertTokenizer(self.vocab_file) features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}] data_collator = DataCollatorWithPadding(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10])) data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8])) def test_data_collator_for_token_classification(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2], "labels": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]}, ] data_collator = DataCollatorForTokenClassification(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, torch.Size([2, 6])) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3) data_collator = DataCollatorForTokenClassification(tokenizer, padding="max_length", max_length=10) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10])) self.assertEqual(batch["labels"].shape, torch.Size([2, 10])) data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8])) self.assertEqual(batch["labels"].shape, torch.Size([2, 8])) data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, torch.Size([2, 6])) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3) for feature in features: feature.pop("labels") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) def test_data_collator_for_token_classification_works_with_pt_tensors(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": torch.tensor([0, 1, 2]), "labels": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3, 4, 5]), "labels": torch.tensor([0, 1, 2, 3, 4, 5])}, ] data_collator = DataCollatorForTokenClassification(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, torch.Size([2, 6])) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3) data_collator = DataCollatorForTokenClassification(tokenizer, padding="max_length", max_length=10) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10])) self.assertEqual(batch["labels"].shape, torch.Size([2, 10])) data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8])) self.assertEqual(batch["labels"].shape, torch.Size([2, 8])) data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, torch.Size([2, 6])) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3) for feature in features: feature.pop("labels") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) def _test_no_pad_and_pad(self, no_pad_features, pad_features): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, pad_to_multiple_of=8) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16))) self.assertEqual(batch["labels"].shape, torch.Size((2, 16))) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16))) self.assertEqual(batch["labels"].shape, torch.Size((2, 16))) tokenizer._pad_token = None data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False) with self.assertRaises(ValueError): data_collator(pad_features) set_seed(42) tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(torch.any(masked_tokens)) self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(torch.any(masked_tokens)) self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16))) self.assertEqual(batch["labels"].shape, torch.Size((2, 16))) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(torch.any(masked_tokens)) self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16))) self.assertEqual(batch["labels"].shape, torch.Size((2, 16))) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(torch.any(masked_tokens)) self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) def test_data_collator_for_language_modeling(self): no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] self._test_no_pad_and_pad(no_pad_features, pad_features) no_pad_features = [list(range(10)), list(range(10))] pad_features = [list(range(5)), list(range(10))] self._test_no_pad_and_pad(no_pad_features, pad_features) def test_data_collator_for_whole_word_mask(self): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="pt") features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) features = [{"input_ids": np.arange(10)}, {"input_ids": np.arange(10)}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) def test_plm(self): tokenizer = BertTokenizer(self.vocab_file) no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] data_collator = DataCollatorForPermutationLanguageModeling(tokenizer) batch = data_collator(pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 10, 10))) self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 10, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) batch = data_collator(no_pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 10, 10))) self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 10, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) example = [np.random.randint(0, 5, [5])] with self.assertRaises(ValueError): data_collator(example) def test_nsp(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i} for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 5))) self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 5))) self.assertEqual(batch["labels"].shape, torch.Size((2, 5))) self.assertEqual(batch["next_sentence_label"].shape, torch.Size((2,))) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 8))) self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 8))) self.assertEqual(batch["labels"].shape, torch.Size((2, 8))) self.assertEqual(batch["next_sentence_label"].shape, torch.Size((2,))) def test_sop(self): tokenizer = BertTokenizer(self.vocab_file) features = [ { "input_ids": torch.tensor([0, 1, 2, 3, 4]), "token_type_ids": torch.tensor([0, 1, 2, 3, 4]), "sentence_order_label": i, } for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 5))) self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 5))) self.assertEqual(batch["labels"].shape, torch.Size((2, 5))) self.assertEqual(batch["sentence_order_label"].shape, torch.Size((2,))) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 8))) self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 8))) self.assertEqual(batch["labels"].shape, torch.Size((2, 8))) self.assertEqual(batch["sentence_order_label"].shape, torch.Size((2,))) @require_tf class TFDataCollatorIntegrationTest(unittest.TestCase): def setUp(self): super().setUp() self.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt") with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_default_with_dict(self): features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].numpy().tolist(), list(range(8))) self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["inputs"].shape.as_list(), [8, 6]) features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].numpy().tolist(), ([[0, 1, 2]] * 8)) self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["inputs"].shape.as_list(), [8, 6]) features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].numpy().tolist(), (list(range(8)))) self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["inputs"].shape.as_list(), [8, 10]) features = [{"label": np.array(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["labels"].numpy().tolist(), list(range(8))) self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["inputs"].shape.as_list(), [8, 10]) def test_numpy_dtype_preservation(self): data_collator = default_data_collator features = [{"input_ids": np.array([0, 1, 2, 3, 4]), "label": np.int64(i)} for i in range(4)] batch = data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].dtype, tf.int64) def test_default_classification_and_regression(self): data_collator = default_data_collator features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)] batch = data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].dtype, tf.int64) features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)] batch = data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].dtype, tf.float32) def test_default_with_no_labels(self): features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape.as_list(), [8, 6]) features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape.as_list(), [8, 6]) def test_data_collator_with_padding(self): tokenizer = BertTokenizer(self.vocab_file) features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}] data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6]) self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, [2, 8]) def test_data_collator_for_token_classification(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2], "labels": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]}, ] data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6]) self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape.as_list(), [2, 6]) self.assertEqual(batch["labels"][0].numpy().tolist(), [0, 1, 2] + [-100] * 3) data_collator = DataCollatorForTokenClassification( tokenizer, padding="max_length", max_length=10, return_tensors="tf" ) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["labels"].shape.as_list(), [2, 8]) data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6]) self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape.as_list(), [2, 6]) self.assertEqual(batch["labels"][0].numpy().tolist(), [0, 1, 2] + [-1] * 3) def _test_no_pad_and_pad(self, no_pad_features, pad_features): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="tf") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) data_collator = DataCollatorForLanguageModeling( tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors="tf" ) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16]) self.assertEqual(batch["labels"].shape.as_list(), [2, 16]) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16]) self.assertEqual(batch["labels"].shape.as_list(), [2, 16]) tokenizer._pad_token = None data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="tf") with self.assertRaises(ValueError): data_collator(pad_features) set_seed(42) tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(tf.reduce_any(masked_tokens)) batch = data_collator(pad_features, return_tensors="tf") self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(tf.reduce_any(masked_tokens)) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16]) self.assertEqual(batch["labels"].shape.as_list(), [2, 16]) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(tf.reduce_any(masked_tokens)) batch = data_collator(pad_features, return_tensors="tf") self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16]) self.assertEqual(batch["labels"].shape.as_list(), [2, 16]) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(tf.reduce_any(masked_tokens)) def test_data_collator_for_language_modeling(self): no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] self._test_no_pad_and_pad(no_pad_features, pad_features) no_pad_features = [list(range(10)), list(range(10))] pad_features = [list(range(5)), list(range(10))] self._test_no_pad_and_pad(no_pad_features, pad_features) def test_data_collator_for_whole_word_mask(self): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="tf") features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) features = [{"input_ids": np.arange(10)}, {"input_ids": np.arange(10)}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) def test_plm(self): tokenizer = BertTokenizer(self.vocab_file) no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors="tf") batch = data_collator(pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["perm_mask"].shape.as_list(), [2, 10, 10]) self.assertEqual(batch["target_mapping"].shape.as_list(), [2, 10, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) batch = data_collator(no_pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["perm_mask"].shape.as_list(), [2, 10, 10]) self.assertEqual(batch["target_mapping"].shape.as_list(), [2, 10, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) example = [np.random.randint(0, 5, [5])] with self.assertRaises(ValueError): data_collator(example) def test_nsp(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i} for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 5]) self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 5]) self.assertEqual(batch["labels"].shape.as_list(), [2, 5]) self.assertEqual(batch["next_sentence_label"].shape.as_list(), [2]) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["labels"].shape.as_list(), [2, 8]) self.assertEqual(batch["next_sentence_label"].shape.as_list(), [2]) def test_sop(self): tokenizer = BertTokenizer(self.vocab_file) features = [ { "input_ids": tf.convert_to_tensor([0, 1, 2, 3, 4]), "token_type_ids": tf.convert_to_tensor([0, 1, 2, 3, 4]), "sentence_order_label": i, } for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 5]) self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 5]) self.assertEqual(batch["labels"].shape.as_list(), [2, 5]) self.assertEqual(batch["sentence_order_label"].shape.as_list(), [2]) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["labels"].shape.as_list(), [2, 8]) self.assertEqual(batch["sentence_order_label"].shape.as_list(), [2]) class NumpyDataCollatorIntegrationTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt") with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_default_with_dict(self): features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].tolist(), list(range(8))) self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["inputs"].shape, (8, 6)) features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].tolist(), [[0, 1, 2]] * 8) self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["inputs"].shape, (8, 6)) features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].tolist(), list(range(8))) self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["inputs"].shape, (8, 10)) features = [{"label": np.array(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["labels"].tolist(), (list(range(8)))) self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["inputs"].shape, (8, 10)) def test_default_classification_and_regression(self): data_collator = default_data_collator features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)] batch = data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].dtype, np.int64) features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)] batch = data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].dtype, np.float32) def test_default_with_no_labels(self): features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape, (8, 6)) features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape, (8, 6)) def test_data_collator_with_padding(self): tokenizer = BertTokenizer(self.vocab_file) features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}] data_collator = DataCollatorWithPadding(tokenizer, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 6)) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 10)) data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 8)) def test_data_collator_for_token_classification(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2], "labels": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]}, ] data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 6)) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, (2, 6)) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3) data_collator = DataCollatorForTokenClassification( tokenizer, padding="max_length", max_length=10, return_tensors="np" ) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 8)) self.assertEqual(batch["labels"].shape, (2, 8)) data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 6)) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, (2, 6)) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3) def _test_no_pad_and_pad(self, no_pad_features, pad_features): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) batch = data_collator(pad_features, return_tensors="np") self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) data_collator = DataCollatorForLanguageModeling( tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors="np" ) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, (2, 16)) self.assertEqual(batch["labels"].shape, (2, 16)) batch = data_collator(pad_features, return_tensors="np") self.assertEqual(batch["input_ids"].shape, (2, 16)) self.assertEqual(batch["labels"].shape, (2, 16)) tokenizer._pad_token = None data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np") with self.assertRaises(ValueError): data_collator(pad_features) set_seed(42) tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(np.any(masked_tokens)) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(np.any(masked_tokens)) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, (2, 16)) self.assertEqual(batch["labels"].shape, (2, 16)) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(np.any(masked_tokens)) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, (2, 16)) self.assertEqual(batch["labels"].shape, (2, 16)) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(np.any(masked_tokens)) def test_data_collator_for_language_modeling(self): no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] self._test_no_pad_and_pad(no_pad_features, pad_features) no_pad_features = [list(range(10)), list(range(10))] pad_features = [list(range(5)), list(range(10))] self._test_no_pad_and_pad(no_pad_features, pad_features) def test_data_collator_for_whole_word_mask(self): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="np") features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) features = [{"input_ids": np.arange(10)}, {"input_ids": np.arange(10)}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) def test_plm(self): tokenizer = BertTokenizer(self.vocab_file) no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors="np") batch = data_collator(pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["perm_mask"].shape, (2, 10, 10)) self.assertEqual(batch["target_mapping"].shape, (2, 10, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) batch = data_collator(no_pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["perm_mask"].shape, (2, 10, 10)) self.assertEqual(batch["target_mapping"].shape, (2, 10, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) example = [np.random.randint(0, 5, [5])] with self.assertRaises(ValueError): data_collator(example) def test_nsp(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i} for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 5)) self.assertEqual(batch["token_type_ids"].shape, (2, 5)) self.assertEqual(batch["labels"].shape, (2, 5)) self.assertEqual(batch["next_sentence_label"].shape, (2,)) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 8)) self.assertEqual(batch["token_type_ids"].shape, (2, 8)) self.assertEqual(batch["labels"].shape, (2, 8)) self.assertEqual(batch["next_sentence_label"].shape, (2,)) def test_sop(self): tokenizer = BertTokenizer(self.vocab_file) features = [ { "input_ids": np.array([0, 1, 2, 3, 4]), "token_type_ids": np.array([0, 1, 2, 3, 4]), "sentence_order_label": i, } for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 5)) self.assertEqual(batch["token_type_ids"].shape, (2, 5)) self.assertEqual(batch["labels"].shape, (2, 5)) self.assertEqual(batch["sentence_order_label"].shape, (2,)) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 8)) self.assertEqual(batch["token_type_ids"].shape, (2, 8)) self.assertEqual(batch["labels"].shape, (2, 8)) self.assertEqual(batch["sentence_order_label"].shape, (2,))
codingutf8 2018 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license save resources not dealing with reporting unless specified also avoids the warning when it s not set can be explicitly disabled via keepreportto for easy batching we make every batchsize consecutive samples the same size we infer the correct model class if one uses gradientcheckpointing or not we ll pop things so operate on copies log history main contain different logs for the time metrics after resuming a training converts a checkpoint of a regression model to a sharded checkpoint only tests that want to tap into the autoprerun 2 trainings self defaulttrainedmodel self alternatetrainedmodel directly or via checktrainedmodel checks a training seeded with learningrate 0 1 checks that training worked model trained and seed made a reproducible training checks that a different seed gets different reproducible results base training should have the same results as testreproducibletraining can return tensors adding one column not used by the model should have no impact retraining should restart from scratch thus lead the same results retraining should restart from scratch thus lead the same results and new seed should be used training with half the batch size but accumulation steps as 2 should give the same results check if model weights have been updated with even logs with uneven logs training loss should be the same as before test scheduler kwargs passed via trainingarguments checking that the scheduler was created checking that the correct args were passed test passed arguments for a custom reducelronplateau scheduler test the reducelronplateau scheduler the lr is computed after metrics and does not exist for the first epoch test the special case where lrnone since trainer can t not have lrscheduler very basic test bf16 halfprecisionbackend apex can t be used together will add more specific tests once there are some bugs to fix very basic test edge case because apex with mode o2 will change our models to return dicts this test checks it doesn t break anything by default the pastkeyvalues are removed we can still get them by setting ignorekeys to logging dir can be slightly different as they default to something with the time regular training has nepochs lentraindl steps check passing numtrainepochs works and a float version too if we pass a maxsteps numtrainepochs is ignored regular training has nepochs lentraindl steps check passing numtrainepochs works and a float version too if we pass a maxsteps numtrainepochs is ignored trainer without infnan filter redefine the model trainer without infnan filter check that it trains without errors make sure forward pass works fine check that we get identical embeddings just in case trainer without infnan filter trainer with infnan filter check droplast works check passing a new dataset for evaluation works tests that we do not require dataloader to have a dataset attribute make the trainer believe it s a parallelized model check the trainer was fooled the batch size of the training and evaluation dataloaders should be 16 not 16 ngpu with a number of elements not a round multiple of the batch size with logits preprocess with a number of elements not a round multiple of the batch size with logits preprocess with a number of elements not a round multiple of the batch size with logits preprocess with a number of elements not a round multiple of the batch size with more than one output of the model with more than one outputlabel of the model with a number of elements not a round multiple of the batch size with more than one output of the model with more than one outputlabel of the model with a number of elements not a round multiple of the batch size with more than one output of the model with more than one outputlabel of the model check evaluation can run to completion check predictions same tests with eval accumulation check evaluation can run to completion check predictions testing only loglevel loglevelreplica requires multiple gpus and ddp and is tested elsewhere test with the default loglevel should be the same as before and thus we test depending on isinfo test with low loglevel lower than info test with high loglevel should be quiet with a regular model that is not a pretrainedmodel with a regular model that is not a pretrainedmodel test that we don t wrap the model more than once since wrapping primarily happens on multigpu setup we want multiple gpus to test for example dataparalleldataparallelmodel this test will fail for more than 2 gpus since the batch size will get bigger and with the number of savesteps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled reinitialize trainer now check with a later checkpoint that it also works when we span over one epoch reinitialize trainer and load model with a regular model that is not a pretrainedmodel reinitialize trainer and load model now check with a later checkpoint that it also works when we span over one epoch reinitialize trainer and load model now check failures 1 fail to find a bogus checkpoint 2 fail to find any checkpoint due a fresh outputdir for more than 1 gpus since the randomness is introduced in the model and with dataparallel which is used in this test for more than 2 gpus the calls to the torch rng will happen in a random order sometimes gpu 0 will call first and sometimes gpu 1 there should be one checkpoint per epoch split with self assertraisesruntimeerror with patch objectsys argv testargs runglue main testargs1 1 with patch objectsys argv testargs runglue main regression for this issue https github comhuggingfacetransformersissues12970 def testtrainingwithresumefromcheckpointfalseself traindataset regressiondatasetlength128 evaldataset regressiondataset config regressionmodelconfiga0 b2 model regressionrandompretrainedmodelconfig tmpdir self getautoremovetmpdir args regressiontrainingargumentstmpdir savesteps5 learningrate0 1 trainer trainermodel args traindatasettraindataset evaldatasetevaldataset trainer trainresumefromcheckpointfalse requiretorchupto2accelerators def testresumetrainingwithshardcheckpointself this test will fail for more than 2 gpus since the batch size will get bigger and with the number of savesteps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled with tempfile temporarydirectory as tmpdir trainer getregressiontraineroutputdirtmpdir trainlen128 savesteps5 learningrate0 1 trainer train a b trainer model a item trainer model b item state dataclasses asdicttrainer state checkpoint os path jointmpdir checkpoint5 self converttoshardedcheckpointcheckpoint reinitialize trainer trainer getregressiontraineroutputdirtmpdir trainlen128 savesteps5 learningrate0 1 trainer trainresumefromcheckpointcheckpoint a1 b1 trainer model a item trainer model b item state1 dataclasses asdicttrainer state self assertequala a1 self assertequalb b1 self checktrainerstatearethesamestate state1 requiresafetensors requiretorchupto2accelerators def testresumetrainingwithsafecheckpointself this test will fail for more than 2 gpus since the batch size will get bigger and with the number of savesteps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled for initialsafe in false true for loadedsafe in false true with tempfile temporarydirectory as tmpdir trainer getregressiontrainer outputdirtmpdir trainlen128 savesteps5 learningrate0 1 savesafetensorsinitialsafe trainer train a b trainer model a item trainer model b item state dataclasses asdicttrainer state checkpoint os path jointmpdir checkpoint5 self converttoshardedcheckpointcheckpoint loadsafeinitialsafe savesafeloadedsafe reinitialize trainer trainer getregressiontrainer outputdirtmpdir trainlen128 savesteps5 learningrate0 1 savesafetensorsloadedsafe trainer trainresumefromcheckpointcheckpoint a1 b1 trainer model a item trainer model b item state1 dataclasses asdicttrainer state self assertequala a1 self assertequalb b1 self checktrainerstatearethesamestate state1 requiretorchupto2accelerators def testresumetrainingwithgradientaccumulationself this test will fail for more than 2 gpus since the batch size will get bigger and with the number of savesteps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled with tempfile temporarydirectory as tmpdir trainer getregressiontrainer outputdirtmpdir trainlen128 gradientaccumulationsteps2 perdevicetrainbatchsize4 savesteps5 learningrate0 1 trainer train a b trainer model a item trainer model b item state dataclasses asdicttrainer state checkpoint os path jointmpdir checkpoint5 reinitialize trainer trainer getregressiontrainer outputdirtmpdir trainlen128 gradientaccumulationsteps2 perdevicetrainbatchsize4 savesteps5 learningrate0 1 trainer trainresumefromcheckpointcheckpoint a1 b1 trainer model a item trainer model b item state1 dataclasses asdicttrainer state self assertequala a1 self assertequalb b1 self checktrainerstatearethesamestate state1 requiretorchupto2accelerators def testresumetrainingwithfrozenparamsself this test will fail for more than 2 gpus since the batch size will get bigger and with the number of savesteps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled with tempfile temporarydirectory as tmpdir trainer getregressiontrainer outputdirtmpdir trainlen128 perdevicetrainbatchsize4 savesteps5 learningrate0 1 trainer model a requiresgradfalse trainer train a b trainer model a item trainer model b item state dataclasses asdicttrainer state checkpoint os path jointmpdir checkpoint5 reinitialize trainer trainer getregressiontrainer outputdirtmpdir trainlen128 perdevicetrainbatchsize4 savesteps5 learningrate0 1 trainer model a requiresgradfalse trainer trainresumefromcheckpointcheckpoint self assertfalsetrainer model a requiresgrad a1 b1 trainer model a item trainer model b item state1 dataclasses asdicttrainer state self assertequala a1 self assertequalb b1 self checktrainerstatearethesamestate state1 def testloadbestmodelatendself total intself nepochs 64 self batchsize with tempfile temporarydirectory as tmpdir trainer getregressiontrainer a1 5 b2 5 outputdirtmpdir learningrate0 1 evalsteps5 evaluationstrategysteps savesteps5 loadbestmodelatendtrue self assertfalsetrainer args greaterisbetter trainer train self checksavedcheckpointstmpdir 5 total self checkbestmodelhasbeenloadedtmpdir 5 total trainer evalloss with tempfile temporarydirectory as tmpdir trainer getregressiontrainer a1 5 b2 5 outputdirtmpdir learningrate0 1 evalsteps5 evaluationstrategysteps savesteps5 loadbestmodelatendtrue metricforbestmodelaccuracy computemetricsalmostaccuracy self asserttruetrainer args greaterisbetter trainer train self checksavedcheckpointstmpdir 5 total self checkbestmodelhasbeenloadedtmpdir 5 total trainer evalaccuracy greaterisbettertrue with tempfile temporarydirectory as tmpdir trainer getregressiontrainer a1 5 b2 5 outputdirtmpdir learningrate0 1 evaluationstrategyepoch savestrategyepoch loadbestmodelatendtrue metricforbestmodelaccuracy computemetricsalmostaccuracy self asserttruetrainer args greaterisbetter trainer train self checksavedcheckpointstmpdir 64 self batchsize total self checkbestmodelhasbeenloaded tmpdir 64 self batchsize total trainer evalaccuracy greaterisbettertrue test this works with a non pretrainedmodel with tempfile temporarydirectory as tmpdir trainer getregressiontrainer outputdirtmpdir learningrate0 1 evalsteps5 evaluationstrategysteps savesteps5 loadbestmodelatendtrue pretrainedfalse self assertfalsetrainer args greaterisbetter trainer train self checksavedcheckpointstmpdir 5 total ispretrainedfalse self checkbestmodelhasbeenloadedtmpdir 5 total trainer evalloss ispretrainedfalse requiresafetensors def testloadbestmodelfromsafetensorsself total intself nepochs 64 self batchsize for savesafetensors pretrained in productfalse true false true with tempfile temporarydirectory as tmpdir trainer getregressiontrainer a1 5 b2 5 outputdirtmpdir learningrate0 1 evalsteps5 evaluationstrategysteps savesteps5 loadbestmodelatendtrue savesafetensorssavesafetensors pretrainedpretrained self assertfalsetrainer args greaterisbetter trainer train self checksavedcheckpointstmpdir 5 total ispretrainedpretrained safeweightssavesafetensors self checkbestmodelhasbeenloaded tmpdir 5 total trainer evalloss ispretrainedpretrained safeweightssavesafetensors slow def testtrainerevalmrpcself modelid bertbasecasedfinetunedmrpc tokenizer autotokenizer frompretrainedmodelid model automodelforsequenceclassification frompretrainedmodelid dataargs gluedatatrainingarguments tasknamemrpc datadirfgettestsdirfixturestestssamplesmrpc overwritecachetrue evaldataset gluedatasetdataargs tokenizertokenizer modedev trainingargs trainingargumentsoutputdir examples usecputrue trainer trainermodelmodel argstrainingargs evaldatasetevaldataset result trainer evaluate self assertlessresultevalloss 0 2 slow def testtrainerevallmself modelid distilrobertabase tokenizer autotokenizer frompretrainedmodelid dataset linebylinetextdataset tokenizertokenizer filepathpathsampletext blocksizetokenizer maxlensinglesentence self assertequallendataset 31 def testtrainingiterabledatasetself config regressionmodelconfig model regressionpretrainedmodelconfig adding one column not used by the model should have no impact traindataset sampleiterabledatasetlabelnameslabels extra args regressiontrainingargumentsoutputdir examples maxsteps4 trainer trainermodelmodel argsargs traindatasettraindataset trainer train self assertequaltrainer state globalstep 4 loader trainer gettraindataloader self assertisinstanceloader torch utils data dataloader self assertisinstanceloader sampler torch utils data dataloader infiniteconstantsampler def testevaluationiterabledatasetself config regressionmodelconfiga1 5 b2 5 model regressionpretrainedmodelconfig adding one column not used by the model should have no impact evaldataset sampleiterabledatasetlabelnameslabels extra args regressiontrainingargumentsoutputdir examples trainer trainermodelmodel argsargs evaldatasetevaldataset computemetricsalmostaccuracy results trainer evaluate x y trainer evaldataset dataset x trainer evaldataset dataset ys0 pred 1 5 x 2 5 expectedloss pred y 2 mean self assertalmostequalresultsevalloss expectedloss expectedacc almostaccuracypred yaccuracy self assertalmostequalresultsevalaccuracy expectedacc with a number of elements not a round multiple of the batch size evaldataset sampleiterabledatasetlength66 results trainer evaluateevaldataset x y evaldataset dataset x evaldataset dataset ys0 pred 1 5 x 2 5 expectedloss pred y 2 mean self assertalmostequalresultsevalloss expectedloss expectedacc almostaccuracypred yaccuracy self assertalmostequalresultsevalaccuracy expectedacc def testpredictiterabledatasetself config regressionmodelconfiga1 5 b2 5 model regressionpretrainedmodelconfig evaldataset sampleiterabledataset args regressiontrainingargumentsoutputdir examples trainer trainermodelmodel argsargs evaldatasetevaldataset computemetricsalmostaccuracy preds trainer predicttrainer evaldataset predictions x evaldataset dataset x self asserttruenp allclosepreds 1 5 x 2 5 with a number of elements not a round multiple of the batch size adding one column not used by the model should have no impact testdataset sampleiterabledatasetlength66 labelnameslabels extra preds trainer predicttestdataset predictions x testdataset dataset x self asserttruenp allclosepreds 1 5 x 2 5 def testnumtrainepochsintrainingself lentraindl gradientaccumulationsteps shouldn t give zerodivisionerror when maxsteps is given it should give 1 update step for each epoch trainer getregressiontrainer maxsteps3 trainlen64 perdevicetrainbatchsize16 gradientaccumulationsteps5 trainoutput trainer train self assertequaltrainoutput globalstep 3 even maxsteps is not specified we still expect 1 update step for each epoch if lentraindl gradientaccumulationsteps trainer getregressiontrainertrainlen64 perdevicetrainbatchsize16 gradientaccumulationsteps5 trainoutput trainer train self assertequaltrainoutput globalstep intself nepochs def testearlystoppingcallbackself early stopping stops training before numtrainingepochs with tempfile temporarydirectory as tmpdir trainer getregressiontrainer outputdirtmpdir numtrainepochs20 gradientaccumulationsteps1 perdevicetrainbatchsize16 loadbestmodelatendtrue evaluationstrategyintervalstrategy epoch savestrategyintervalstrategy epoch computemetricsalmostaccuracy metricforbestmodelaccuracy trainer addcallbackearlystoppingcallback1 0 0001 trainoutput trainer train self assertlesstrainoutput globalstep 20 64 16 invalid inputs to trainer with early stopping callback result in assertion error with tempfile temporarydirectory as tmpdir trainer getregressiontrainer outputdirtmpdir numtrainepochs20 gradientaccumulationsteps1 perdevicetrainbatchsize16 evaluationstrategyintervalstrategy epoch computemetricsalmostaccuracy metricforbestmodelaccuracy trainer addcallbackearlystoppingcallback1 self assertequaltrainer state globalstep 0 try trainer train except assertionerror self assertequaltrainer state globalstep 0 def testflosextractionself trainer getregressiontrainerlearningrate0 1 def assertflosextractiontrainer wrappedmodeltocheck self assertequaltrainer model unwrapmodelwrappedmodeltocheck self assertgreaterequalgetattrunwrapmodelwrappedmodeltocheck config totalflos 0 0 with plain model assertflosextractiontrainer trainer model with enforced dataparallel assertflosextractiontrainer nn dataparalleltrainer model trainer train self asserttrueisinstancetrainer state totalflos float def checkcheckpointdeletionself trainer outputdir expected make fake checkpoints for n in 5 10 15 20 25 os makedirsos path joinoutputdir fprefixcheckpointdirn existoktrue trainer rotatecheckpointsoutputdiroutputdir globcheckpoints strx for x in pathoutputdir globfprefixcheckpointdir values intre matchf prefixcheckpointdir09 d groups0 for d in globcheckpoints self assertsetequalsetvalues setexpected def testcheckpointrotationself with tempfile temporarydirectory as tmpdir without best model at end trainer getregressiontraineroutputdirtmpdir savetotallimit2 self checkcheckpointdeletiontrainer tmpdir 20 25 with best model at end trainer getregressiontrainer outputdirtmpdir evaluationstrategysteps loadbestmodelatendtrue savetotallimit2 trainer state bestmodelcheckpoint os path jointmpdir checkpoint5 self checkcheckpointdeletiontrainer tmpdir 5 25 edge case we don t always honor savetotallimit1 if loadbestmodelatendtrue to be able to resume from checkpoint trainer getregressiontrainer outputdirtmpdir evaluationstrategysteps loadbestmodelatendtrue savetotallimit1 trainer state bestmodelcheckpoint os path jointmpdir checkpoint25 self checkcheckpointdeletiontrainer tmpdir 25 trainer state bestmodelcheckpoint os path jointmpdir checkpoint5 self checkcheckpointdeletiontrainer tmpdir 5 25 def checkmemmetricsself trainer checkfunc metrics trainer train metrics checkfuncinitmemcpuallocdelta metrics checkfunctrainmemcpuallocdelta metrics if backenddevicecounttorchdevice 0 checkfuncinitmemgpuallocdelta metrics checkfunctrainmemgpuallocdelta metrics metrics trainer evaluate checkfuncevalmemcpuallocdelta metrics if backenddevicecounttorchdevice 0 checkfuncevalmemgpuallocdelta metrics metrics trainer predictregressiondataset metrics checkfunctestmemcpuallocdelta metrics if backenddevicecounttorchdevice 0 checkfunctestmemgpuallocdelta metrics def testmemmetricsself with mem metrics enabled trainer getregressiontrainerskipmemorymetricsfalse self checkmemmetricstrainer self assertin with mem metrics disabled trainer getregressiontrainerskipmemorymetricstrue self checkmemmetricstrainer self assertnotin requiretorchaccelerator def testfp16fullevalself this is a sensitive test so let s keep debugging printouts in place for quick diagnosis it s using pretty large safety margins but small enough to detect broken functionality debug 0 ngpus backenddevicecounttorchdevice bs 8 evallen 16 ngpus make the params somewhat big so that there will be enough ram consumed to be able to measure things we should get about 64kb for ab in fp32 a torch ones1000 bs 0 001 b torch ones1000 bs 0 001 1 with fp16fulleval disabled trainer getregressiontraineraa bb evallenevallen skipmemorymetricsfalse metrics trainer evaluate del trainer gc collect fp32init metricsinitmemgpuallocdelta fp32eval metricsevalmemgpuallocdelta if debug printffp32init fp32init printffp32eval fp32eval here we expect the model to be preloaded in trainer init and consume around 64k gpu ram perfect world fp32init 6410 self assertgreaterfp32init 59000 after eval should be no extra memory allocated with a small margin other than the peak memory consumption for the forward calculation that gets recovered perfect world fp32eval close to zero self assertlessfp32eval 5000 2 with fp16fulleval enabled trainer getregressiontraineraa bb evallenevallen fp16fullevaltrue skipmemorymetricsfalse metrics trainer evaluate fp16init metricsinitmemgpuallocdelta fp16eval metricsevalmemgpuallocdelta if debug printffp16init fp16init printffp16eval fp16eval here we expect the model to not be preloaded in trainer init so with a small margin it should be close to 0 perfect world fp16init close to zero self assertlessfp16init 5000 here we put the model on device in eval and only half of it i e about 32k again we ignore the peak margin which gets returned back perfect world fp32init 3210 self assertgreaterfp16eval 27000 3 relative comparison fp32 vs full fp16 should be about half of fp16init perfect world fp32init2 fp16eval self assertalmostequalfp16eval fp32init 2 delta5000 requiretorchnonmultigpu requiretorchdynamo requiretorchtensorrtfx def testtorchdynamofullevalself import torchdynamo torchdynamo at the moment doesn t support dpddp therefore require a single gpu ngpus getgpucount bs 8 evallen 16 ngpus make the params are somewhat big so that there will be enough ram consumed to be able to measure things we should get about 64kb for ab in fp32 a torch ones1000 bs 0 001 b torch ones1000 bs 0 001 1 default without torchdynamo trainer getregressiontraineraa bb evallenevallen metrics trainer evaluate originalevalloss metricsevalloss del trainer 2 torchdynamo eager trainer getregressiontraineraa bb evallenevallen torchdynamoeager metrics trainer evaluate self assertalmostequalmetricsevalloss originalevalloss del trainer torchdynamo reset 3 torchdynamo nvfuser trainer getregressiontraineraa bb evallenevallen torchdynamonvfuser metrics trainer evaluate self assertalmostequalmetricsevalloss originalevalloss torchdynamo reset 4 torchdynamo fx2trt trainer getregressiontraineraa bb evallenevallen torchdynamofx2trt metrics trainer evaluate self assertalmostequalmetricsevalloss originalevalloss torchdynamo reset unittest skiptorch 2 0 0 gives modulenotfounderror no module named torchdynamo requiretorchnonmultigpu requiretorchdynamo def testtorchdynamomemoryself torchdynamo at the moment doesn t support dpddp therefore require a single gpu import torchdynamo class customtrainertrainer def computelossself model inputs returnoutputsfalse x inputsx output modelx if self args ngpu 1 return output mean return output class mymoduletorch nn module 1 without torchdynamo eager baseline warmup resets 2 torchdynamo nvfuser warmup resets functional check aot autograd recomputaion and nvfuser recomputation optimization aggressively fuses the operations and reduce the memory footprint note most of the logic is the same as testfp16fulleval this is a sensitive test so let s keep debugging printouts in place for quick diagnosis it s using pretty large safety margins but small enough to detect broken functionality make the params somewhat big so that there will be enough ram consumed to be able to measure things we should get about 64kb for ab in fp32 1 with bf16fulleval disabled here we expect the model to be preloaded in trainer init and consume around 64k gpu ram perfect world fp32init 6410 after eval should be no extra memory allocated with a small margin other than the peak memory consumption for the forward calculation that gets recovered perfect world fp32eval close to zero 2 with bf16fulleval enabled here we expect the model to not be preloaded in trainer init so with a small margin it should be close to 0 perfect world bf16init close to zero here we put the model on device in eval and only half of it i e about 32k again we ignore the peak margin which gets returned back perfect world fp32init 3210 3 relative comparison fp32 vs full bf16 should be about half of bf16init perfect world fp32init2 bf16eval tests that translation py will run without issues successful return here success any errors would have caused an error or a timeout in the subcall extract reponame from the url extract reponame from the url to avoid any flakiness if the training goes faster than the uploads to avoid any flakiness if the training goes faster than the uploads maxsteps depend on the number of available gpus push the runs via pushtohub exercises all the valid optim options pretend that apex is installed and mock apex optimizers fusedadam exists trainer getoptimizerclsandkwargs does not use fusedadam it only has to return the class given so mocking apex optimizers fusedadam should be fine for testing and allow the test to run without requiring an apex installation pretend that apex does not exist even if installed by setting apex to none importing apex will fail even if apex is installed pretend that bits and bytes is installed and mock bnb optim adam8bit exists trainer getoptimizerclsandkwargs does not use adam8bit it only has to return the class given so mocking bnb optim adam8bit should be fine for testing and allow the test to run without requiring a bnb installation pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that torchdistx is installed and mock torchdistx optimizers anyprecisionadamw exists trainer getoptimizerclsandkwargs does not use anyprecisioinadamw it only has to return the class given so mocking torchdistx optimizers anyprecisionadamw should be fine for testing and allow the test to run without requiring a bnb installation pretend that torchdistx does not exist even if installed by setting torchdistx to none importing torchdistx optimizers will fail even if torchdistx is installed coding utf 8 2018 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license save resources not dealing with reporting unless specified also avoids the warning when it s not set can be explicitly disabled via keep_report_to for easy batching we make every batch_size consecutive samples the same size we infer the correct model class if one uses gradient_checkpointing or not we ll pop things so operate on copies log history main contain different logs for the time metrics after resuming a training converts a checkpoint of a regression model to a sharded checkpoint only tests that want to tap into the auto pre run 2 trainings self default_trained_model self alternate_trained_model directly or via check_trained_model checks a training seeded with learning_rate 0 1 checks that training worked model trained and seed made a reproducible training checks that a different seed gets different reproducible results base training should have the same results as test_reproducible_training can return tensors adding one column not used by the model should have no impact re training should restart from scratch thus lead the same results re training should restart from scratch thus lead the same results and new seed should be used training with half the batch size but accumulation steps as 2 should give the same results check if model weights have been updated with even logs with uneven logs training loss should be the same as before test scheduler kwargs passed via trainingarguments non default arguments checking that the scheduler was created checking that the correct args were passed test passed arguments for a custom reducelronplateau scheduler test the reducelronplateau scheduler the lr is computed after metrics and does not exist for the first epoch compare learning rate to next epoch s test the special case where lr none since trainer can t not have lr_scheduler very basic test bf16 half_precision_backend apex can t be used together will add more specific tests once there are some bugs to fix very basic test edge case because apex with mode o2 will change our models to return dicts this test checks it doesn t break anything by default the past_key_values are removed we can still get them by setting ignore_keys to logging dir can be slightly different as they default to something with the time regular training has n_epochs len train_dl steps check passing num_train_epochs works and a float version too if we pass a max_steps num_train_epochs is ignored regular training has n_epochs len train_dl steps check passing num_train_epochs works and a float version too if we pass a max_steps num_train_epochs is ignored trainer without inf nan filter redefine the model trainer without inf nan filter check that it trains without errors make sure forward pass works fine check that we get identical embeddings just in case trainer without inf nan filter trainer with inf nan filter check drop_last works check passing a new dataset for evaluation works tests that we do not require dataloader to have a dataset attribute make the trainer believe it s a parallelized model check the trainer was fooled the batch size of the training and evaluation dataloaders should be 16 not 16 n_gpu with a number of elements not a round multiple of the batch size with logits preprocess with a number of elements not a round multiple of the batch size with logits preprocess with a number of elements not a round multiple of the batch size with logits preprocess with a number of elements not a round multiple of the batch size with more than one output of the model with more than one output label of the model with a number of elements not a round multiple of the batch size with more than one output of the model with more than one output label of the model with a number of elements not a round multiple of the batch size with more than one output of the model with more than one output label of the model check evaluation can run to completion check predictions same tests with eval accumulation check evaluation can run to completion check predictions testing only log_level log_level_replica requires multiple gpus and ddp and is tested elsewhere test with the default log_level should be the same as before and thus we test depending on is_info test with low log_level lower than info test with high log_level should be quiet with a regular model that is not a pretrainedmodel with a regular model that is not a pretrainedmodel test that we don t wrap the model more than once since wrapping primarily happens on multi gpu setup we want multiple gpus to test for example dataparallel dataparallel model this test will fail for more than 2 gpus since the batch size will get bigger and with the number of save_steps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled reinitialize trainer now check with a later checkpoint that it also works when we span over one epoch reinitialize trainer and load model with a regular model that is not a pretrainedmodel reinitialize trainer and load model now check with a later checkpoint that it also works when we span over one epoch reinitialize trainer and load model now check failures 1 fail to find a bogus checkpoint 2 fail to find any checkpoint due a fresh output_dir for more than 1 gpus since the randomness is introduced in the model and with dataparallel which is used in this test for more than 2 gpus the calls to the torch rng will happen in a random order sometimes gpu 0 will call first and sometimes gpu 1 there should be one checkpoint per epoch run_glue py model_name_or_path distilbert base uncased task_name mrpc do_train do_eval max_seq_len 128 per_device_train_batch_size 4096 learning_rate 2e 5 num_train_epochs 1 output_dir tmpdir auto_find_batch_size 0 regression for this issue https github com huggingface transformers issues 12970 this test will fail for more than 2 gpus since the batch size will get bigger and with the number of save_steps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled reinitialize trainer this test will fail for more than 2 gpus since the batch size will get bigger and with the number of save_steps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled reinitialize trainer this test will fail for more than 2 gpus since the batch size will get bigger and with the number of save_steps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled reinitialize trainer this test will fail for more than 2 gpus since the batch size will get bigger and with the number of save_steps the checkpoint will resume training at epoch 2 or more so the data seen by the model won t be the same since the training dataloader is shuffled reinitialize trainer test this works with a non pretrainedmodel adding one column not used by the model should have no impact adding one column not used by the model should have no impact with a number of elements not a round multiple of the batch size with a number of elements not a round multiple of the batch size adding one column not used by the model should have no impact len train_dl gradient_accumulation_steps shouldn t give zerodivisionerror when max_steps is given it should give 1 update step for each epoch even max_steps is not specified we still expect 1 update step for each epoch if len train_dl gradient_accumulation_steps early stopping stops training before num_training_epochs invalid inputs to trainer with early stopping callback result in assertion error with plain model with enforced dataparallel make fake checkpoints without best model at end with best model at end edge case we don t always honor save_total_limit 1 if load_best_model_at_end true to be able to resume from checkpoint with mem metrics enabled with mem metrics disabled this is a sensitive test so let s keep debugging printouts in place for quick diagnosis it s using pretty large safety margins but small enough to detect broken functionality make the params somewhat big so that there will be enough ram consumed to be able to measure things we should get about 64kb for a b in fp32 1 with fp16_full_eval disabled here we expect the model to be preloaded in trainer __init__ and consume around 64k gpu ram perfect world fp32_init 64 10 after eval should be no extra memory allocated with a small margin other than the peak memory consumption for the forward calculation that gets recovered perfect world fp32_eval close to zero 2 with fp16_full_eval enabled here we expect the model to not be preloaded in trainer __init__ so with a small margin it should be close to 0 perfect world fp16_init close to zero here we put the model on device in eval and only half of it i e about 32k again we ignore the peak margin which gets returned back perfect world fp32_init 32 10 3 relative comparison fp32 vs full fp16 should be about half of fp16_init perfect world fp32_init 2 fp16_eval torchdynamo at the moment doesn t support dp ddp therefore require a single gpu make the params are somewhat big so that there will be enough ram consumed to be able to measure things we should get about 64kb for a b in fp32 1 default without torchdynamo 2 torchdynamo eager 3 torchdynamo nvfuser 4 torchdynamo fx2trt torchdynamo at the moment doesn t support dp ddp therefore require a single gpu simple module that does aggressive fusion 1 without torchdynamo eager baseline warmup resets 2 torchdynamo nvfuser warmup resets functional check aot autograd recomputaion and nvfuser recomputation optimization aggressively fuses the operations and reduce the memory footprint note most of the logic is the same as test_fp16_full_eval this is a sensitive test so let s keep debugging printouts in place for quick diagnosis it s using pretty large safety margins but small enough to detect broken functionality make the params somewhat big so that there will be enough ram consumed to be able to measure things we should get about 64kb for a b in fp32 1 with bf16_full_eval disabled here we expect the model to be preloaded in trainer __init__ and consume around 64k gpu ram perfect world fp32_init 64 10 after eval should be no extra memory allocated with a small margin other than the peak memory consumption for the forward calculation that gets recovered perfect world fp32_eval close to zero 2 with bf16_full_eval enabled here we expect the model to not be preloaded in trainer __init__ so with a small margin it should be close to 0 perfect world bf16_init close to zero here we put the model on device in eval and only half of it i e about 32k again we ignore the peak margin which gets returned back perfect world fp32_init 32 10 3 relative comparison fp32 vs full bf16 should be about half of bf16_init perfect world fp32_init 2 bf16_eval fmt skip tests that translation py will run without issues successful return here success any errors would have caused an error or a timeout in the sub call extract repo_name from the url extract repo_name from the url to avoid any flakiness if the training goes faster than the uploads to avoid any flakiness if the training goes faster than the uploads max_steps depend on the number of available gpus push the runs via push_to_hub exercises all the valid optim options pretend that apex is installed and mock apex optimizers fusedadam exists trainer get_optimizer_cls_and_kwargs does not use fusedadam it only has to return the class given so mocking apex optimizers fusedadam should be fine for testing and allow the test to run without requiring an apex installation pretend that apex does not exist even if installed by setting apex to none importing apex will fail even if apex is installed pretend that bits and bytes is installed and mock bnb optim adam8bit exists trainer get_optimizer_cls_and_kwargs does not use adam8bit it only has to return the class given so mocking bnb optim adam8bit should be fine for testing and allow the test to run without requiring a bnb installation pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that bnb does not exist even if installed by setting bnb to none importing bnb will fail even if bnb is installed pretend that torchdistx is installed and mock torchdistx optimizers anyprecisionadamw exists trainer get_optimizer_cls_and_kwargs does not use anyprecisioinadamw it only has to return the class given so mocking torchdistx optimizers anyprecisionadamw should be fine for testing and allow the test to run without requiring a bnb installation pretend that torchdistx does not exist even if installed by setting torchdistx to none importing torchdistx optimizers will fail even if torchdistx is installed
import dataclasses import gc import json import math import os import random import re import subprocess import sys import tempfile import unittest from itertools import product from pathlib import Path from typing import Dict, List from unittest.mock import Mock, patch import numpy as np from huggingface_hub import HfFolder, delete_repo, list_repo_commits, list_repo_files from parameterized import parameterized from requests.exceptions import HTTPError from transformers import ( AutoTokenizer, IntervalStrategy, PretrainedConfig, TrainingArguments, get_polynomial_decay_schedule_with_warmup, is_torch_available, logging, ) from transformers.hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS from transformers.testing_utils import ( ENDPOINT_STAGING, TOKEN, USER, CaptureLogger, TestCasePlus, backend_device_count, execute_subprocess_async, get_gpu_count, get_tests_dir, is_staging_test, require_accelerate, require_intel_extension_for_pytorch, require_optuna, require_ray, require_safetensors, require_sentencepiece, require_sigopt, require_tensorboard, require_tokenizers, require_torch, require_torch_accelerator, require_torch_bf16, require_torch_gpu, require_torch_multi_accelerator, require_torch_non_multi_accelerator, require_torch_non_multi_gpu, require_torch_tensorrt_fx, require_torch_tf32, require_torch_up_to_2_accelerators, require_torchdynamo, require_wandb, slow, torch_device, ) from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, HPSearchBackend from transformers.training_args import OptimizerNames from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, is_apex_available, is_bitsandbytes_available, is_safetensors_available, is_torchdistx_available, ) from transformers.utils.hp_naming import TrialShortNamer if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset import transformers.optimization from transformers import ( AutoModelForSequenceClassification, EarlyStoppingCallback, GlueDataset, GlueDataTrainingArguments, GPT2Config, GPT2LMHeadModel, LineByLineTextDataset, PreTrainedModel, Trainer, TrainerState, ) from transformers.modeling_utils import unwrap_model if is_safetensors_available(): import safetensors.torch PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt" class RegressionDataset: def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): np.random.seed(seed) self.label_names = ["labels"] if label_names is None else label_names self.length = length self.x = np.random.normal(size=(length,)).astype(np.float32) self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names] self.ys = [y.astype(np.float32) for y in self.ys] def __len__(self): return self.length def __getitem__(self, i): result = {name: y[i] for name, y in zip(self.label_names, self.ys)} result["input_x"] = self.x[i] return result @dataclasses.dataclass class RegressionTrainingArguments(TrainingArguments): a: float = 0.0 b: float = 0.0 keep_report_to: bool = False def __post_init__(self): super().__post_init__() if not self.keep_report_to: self.report_to = [] class RepeatDataset: def __init__(self, x, length=64): self.x = x self.length = length def __len__(self): return self.length def __getitem__(self, i): return {"input_ids": self.x, "labels": self.x} class DynamicShapesDataset: def __init__(self, length=64, seed=42, batch_size=8): self.length = length np.random.seed(seed) sizes = np.random.randint(1, 20, (length // batch_size,)) self.xs = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)] self.ys = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)] def __len__(self): return self.length def __getitem__(self, i): return {"input_x": self.xs[i], "labels": self.ys[i]} class AlmostAccuracy: def __init__(self, thresh=0.25): self.thresh = thresh def __call__(self, eval_pred): predictions, labels = eval_pred true = np.abs(predictions - labels) <= self.thresh return {"accuracy": true.astype(np.float32).mean().item()} class RegressionModelConfig(PretrainedConfig): def __init__(self, a=0, b=0, double_output=False, random_torch=True, **kwargs): super().__init__(**kwargs) self.a = a self.b = b self.double_output = double_output self.random_torch = random_torch self.hidden_size = 1 if is_torch_available(): class SampleIterableDataset(IterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names) def __iter__(self): for i in range(len(self.dataset)): yield self.dataset[i] class FiniteIterableDataset(SampleIterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): super().__init__(a, b, length, seed, label_names) self.current_sample = 0 def __iter__(self): while self.current_sample < len(self.dataset): yield self.dataset[self.current_sample] self.current_sample += 1 class MultiLoader: def __init__(self, loaders): self.loaders = loaders def __len__(self): return sum(len(loader) for loader in self.loaders) def __iter__(self): for loader in self.loaders: yield from loader class CustomDataloaderTrainer(Trainer): def get_train_dataloader(self): dataloaders = [super().get_train_dataloader(), super().get_train_dataloader()] return MultiLoader(dataloaders) def get_eval_dataloader(self, eval_dataset): dataloaders = [super().get_eval_dataloader(eval_dataset), super().get_eval_dataloader(eval_dataset)] return MultiLoader(dataloaders) class RegressionModel(nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.double_output = double_output self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionDictModel(nn.Module): def __init__(self, a=0, b=0): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b result = {"output": y} if labels is not None: result["loss"] = nn.functional.mse_loss(y, labels) return result class RegressionPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.double_output = config.double_output def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionPreTrainedModelWithGradientCheckpointing(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" supports_gradient_checkpointing = True def __init__(self, config): super().__init__(config) self.layers = nn.ModuleList([nn.Linear(config.hidden_size, config.hidden_size) for _ in range(4)]) self.head = nn.Linear(config.hidden_size, 1) self.gradient_checkpointing = False self.double_output = config.double_output def forward(self, input_x, labels=None, **kwargs): y = input_x.unsqueeze(0) for layer in self.layers: if self.training and self.gradient_checkpointing: outputs = self._gradient_checkpointing_func(layer.__call__, y) else: outputs = layer(y) y = outputs * 3 logits = self.head(y) if labels is None: return (logits, logits) if self.double_output else (logits,) loss = nn.functional.mse_loss(logits, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionRandomPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.random_torch = config.random_torch def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if self.random_torch: torch_rand = torch.randn(1).squeeze() np_rand = np.random.rand() rand_rand = random.random() if self.random_torch: y += 0.05 * torch_rand y += 0.05 * torch.tensor(np_rand + rand_rand) if labels is None: return (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y) class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) def get_regression_trainer( a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, keep_report_to=False, **kwargs ): label_names = kwargs.get("label_names", None) gradient_checkpointing = kwargs.get("gradient_checkpointing", False) train_dataset = RegressionDataset(length=train_len, label_names=label_names) eval_dataset = RegressionDataset(length=eval_len, label_names=label_names) model_init = kwargs.pop("model_init", None) if model_init is not None: model = None else: if pretrained: config = RegressionModelConfig(a=a, b=b, double_output=double_output) target_cls = ( RegressionPreTrainedModel if not gradient_checkpointing else RegressionPreTrainedModelWithGradientCheckpointing ) model = target_cls(config) else: model = RegressionModel(a=a, b=b, double_output=double_output) compute_metrics = kwargs.pop("compute_metrics", None) data_collator = kwargs.pop("data_collator", None) optimizers = kwargs.pop("optimizers", (None, None)) output_dir = kwargs.pop("output_dir", "./regression") preprocess_logits_for_metrics = kwargs.pop("preprocess_logits_for_metrics", None) args = RegressionTrainingArguments(output_dir, a=a, b=b, keep_report_to=keep_report_to, **kwargs) return Trainer( model, args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, optimizers=optimizers, model_init=model_init, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) class TrainerIntegrationCommon: def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True, safe_weights=True): weights_file = WEIGHTS_NAME if not safe_weights else SAFE_WEIGHTS_NAME file_list = [weights_file, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"] if is_pretrained: file_list.append("config.json") for step in range(freq, total, freq): checkpoint = os.path.join(output_dir, f"checkpoint-{step}") self.assertTrue(os.path.isdir(checkpoint)) for filename in file_list: self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename))) def check_best_model_has_been_loaded( self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True, safe_weights=True ): checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}") log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history values = [d[metric] for d in log_history] best_value = max(values) if greater_is_better else min(values) best_checkpoint = (values.index(best_value) + 1) * freq checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}") if is_pretrained: best_model = RegressionPreTrainedModel.from_pretrained(checkpoint) best_model.to(trainer.args.device) else: best_model = RegressionModel() if not safe_weights: state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME)) else: state_dict = safetensors.torch.load_file(os.path.join(checkpoint, SAFE_WEIGHTS_NAME)) best_model.load_state_dict(state_dict) best_model.to(trainer.args.device) self.assertTrue(torch.allclose(best_model.a, trainer.model.a)) self.assertTrue(torch.allclose(best_model.b, trainer.model.b)) metrics = trainer.evaluate() self.assertEqual(metrics[metric], best_value) def check_trainer_state_are_the_same(self, trainer_state, trainer_state1): state = trainer_state.copy() state1 = trainer_state1.copy() log_history = state.pop("log_history", None) log_history1 = state1.pop("log_history", None) self.assertEqual(state, state1) skip_log_keys = ["train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss"] for log, log1 in zip(log_history, log_history1): for key in skip_log_keys: _ = log.pop(key, None) _ = log1.pop(key, None) self.assertEqual(log, log1) def convert_to_sharded_checkpoint(self, folder, save_safe=True, load_safe=True): if load_safe: loader = safetensors.torch.load_file weights_file = os.path.join(folder, SAFE_WEIGHTS_NAME) else: loader = torch.load weights_file = os.path.join(folder, WEIGHTS_NAME) if save_safe: extension = "safetensors" saver = safetensors.torch.save_file index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) shard_name = SAFE_WEIGHTS_NAME else: extension = "bin" saver = torch.save index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) shard_name = WEIGHTS_NAME state_dict = loader(weights_file) os.remove(weights_file) keys = list(state_dict.keys()) shard_files = [ shard_name.replace(f".{extension}", f"-{idx+1:05d}-of-{len(keys):05d}.{extension}") for idx in range(len(keys)) ] index = {"metadata": {}, "weight_map": {key: shard_files[i] for i, key in enumerate(keys)}} with open(index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) for param_name, shard_file in zip(keys, shard_files): saver({param_name: state_dict[param_name]}, os.path.join(folder, shard_file)) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size trainer = get_regression_trainer(learning_rate=0.1) trainer.train() self.default_trained_model = (trainer.model.a, trainer.model.b) trainer = get_regression_trainer(learning_rate=0.1, seed=314) trainer.train() self.alternate_trained_model = (trainer.model.a, trainer.model.b) def check_trained_model(self, model, alternate_seed=False): (a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model self.assertTrue(torch.allclose(model.a, a)) self.assertTrue(torch.allclose(model.b, b)) def test_reproducible_training(self): trainer = get_regression_trainer(learning_rate=0.1) trainer.train() self.check_trained_model(trainer.model) trainer = get_regression_trainer(learning_rate=0.1, seed=314) trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) def test_trainer_with_datasets(self): import datasets np.random.seed(42) x = np.random.normal(size=(64,)).astype(np.float32) y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,)) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y}) model = RegressionModel() args = TrainingArguments("./regression", learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) train_dataset.set_format(type="torch", dtype=torch.float32) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) z = np.random.normal(size=(64,)).astype(np.float32) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z}) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) def test_model_init(self): train_dataset = RegressionDataset() args = TrainingArguments("./regression", learning_rate=0.1) trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel()) trainer.train() self.check_trained_model(trainer.model) trainer.train() self.check_trained_model(trainer.model) trainer.args.seed = 314 trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) def test_gradient_accumulation(self): trainer = get_regression_trainer( gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1 ) trainer.train() self.check_trained_model(trainer.model) def test_gradient_checkpointing(self): trainer = get_regression_trainer( per_device_train_batch_size=1, learning_rate=0.1, gradient_checkpointing=True, gradient_checkpointing_kwargs={"use_reentrant": False}, ) previous_params = {k: v.detach().clone() for k, v in trainer.model.named_parameters()} trainer.train() for k, v in trainer.model.named_parameters(): self.assertFalse( torch.allclose(previous_params[k], v, rtol=1e-4, atol=1e-4), f"Model weights for {k} have not been updated", ) def test_training_loss(self): n_gpus = max(1, backend_device_count(torch_device)) trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus)) trainer.train() log_history = trainer.state.log_history losses = [log["loss"] for log in log_history if "loss" in log] train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(sum(losses) / len(losses), train_loss, places=4) trainer = get_regression_trainer(logging_steps=5) trainer.train() log_history = trainer.state.log_history new_train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(train_loss, new_train_loss, places=4) def test_custom_optimizer(self): train_dataset = RegressionDataset() args = TrainingArguments("./regression") model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0) def test_lr_scheduler_kwargs(self): train_dataset = RegressionDataset() model = RegressionModel() num_steps, num_warmup_steps = 10, 2 extra_kwargs = {"power": 5.0, "lr_end": 1e-5} args = TrainingArguments( "./regression", lr_scheduler_type="polynomial", lr_scheduler_kwargs=extra_kwargs, learning_rate=0.2, warmup_steps=num_warmup_steps, ) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) self.assertIsNotNone(trainer.lr_scheduler) sched1 = trainer.lr_scheduler sched2 = get_polynomial_decay_schedule_with_warmup( trainer.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_steps, **extra_kwargs ) self.assertEqual(sched1.lr_lambdas[0].args, sched2.lr_lambdas[0].args) self.assertEqual(sched1.lr_lambdas[0].keywords, sched2.lr_lambdas[0].keywords) def test_reduce_lr_on_plateau_args(self): train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) args = TrainingArguments( "./regression", evaluation_strategy="epoch", metric_for_best_model="eval_loss", ) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2, patience=5, cooldown=2) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, optimizers=(optimizer, lr_scheduler) ) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) self.assertEqual(trainer.lr_scheduler.factor, 0.2) self.assertEqual(trainer.lr_scheduler.patience, 5) self.assertEqual(trainer.lr_scheduler.cooldown, 2) def test_reduce_lr_on_plateau(self): class TrainerWithLRLogs(Trainer): def log(self, logs): if hasattr(self.lr_scheduler, "_last_lr"): logs["learning_rate"] = self.lr_scheduler._last_lr[0] super().log(logs) train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) args = TrainingArguments( "./regression", lr_scheduler_type="reduce_lr_on_plateau", evaluation_strategy="epoch", metric_for_best_model="eval_loss", num_train_epochs=10, learning_rate=0.2, ) model = RegressionModel() trainer = TrainerWithLRLogs(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) patience = trainer.lr_scheduler.patience logs = trainer.state.log_history[1:] best_loss = logs[0]["eval_loss"] bad_epochs = 0 for i, log in enumerate(logs[:-1]): loss = log["eval_loss"] just_decreased = False if loss > best_loss: bad_epochs += 1 if bad_epochs > patience: self.assertLess(logs[i + 1]["learning_rate"], log["learning_rate"]) just_decreased = True bad_epochs = 0 else: best_loss = loss bad_epochs = 0 if not just_decreased: self.assertEqual(logs[i + 1]["learning_rate"], log["learning_rate"]) def test_adafactor_lr_none(self): from transformers.optimization import Adafactor, AdafactorSchedule train_dataset = RegressionDataset() args = TrainingArguments("./regression") model = RegressionModel() optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertGreater(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 0) @require_torch_accelerator @require_torch_bf16 def test_mixed_bf16(self): trainer = get_regression_trainer(learning_rate=0.1, bf16=True) trainer.train() self.check_trained_model(trainer.model) with self.assertRaises(ValueError): trainer = get_regression_trainer(learning_rate=0.1, bf16=True, half_precision_backend="apex") @require_torch_gpu @require_torch_tf32 def test_tf32(self): trainer = get_regression_trainer(learning_rate=0.1, tf32=True) trainer.train() self.check_trained_model(trainer.model) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_trainer_works_with_dict(self): train_dataset = RegressionDataset() eval_dataset = RegressionDataset() model = RegressionDictModel() args = TrainingArguments("./regression") trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() _ = trainer.evaluate() _ = trainer.predict(eval_dataset) def test_evaluation_with_keys_to_drop(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) eval_dataset = RepeatDataset(x) args = TrainingArguments("./test") trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset) result = trainer.predict(eval_dataset) self.assertTrue(isinstance(result.predictions, np.ndarray)) result = trainer.predict(eval_dataset, ignore_keys=[]) self.assertTrue(isinstance(result.predictions, tuple)) self.assertEqual(len(result.predictions), 2) def test_training_arguments_are_left_untouched(self): trainer = get_regression_trainer() trainer.train() args = TrainingArguments("./regression", report_to=[]) dict1, dict2 = args.to_dict(), trainer.args.to_dict() for key in dict1.keys(): if key != "logging_dir": self.assertEqual(dict1[key], dict2[key]) def test_number_of_steps_in_training(self): trainer = get_regression_trainer(learning_rate=0.1) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size) trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size)) trainer = get_regression_trainer(learning_rate=0.1, max_steps=10) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) @require_torch_bf16 @require_intel_extension_for_pytorch def test_number_of_steps_in_training_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer(learning_rate=0.1, use_ipex=True, bf16=mix_bf16, use_cpu=True) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / trainer.args.train_batch_size) trainer = get_regression_trainer( learning_rate=0.1, num_train_epochs=1.5, use_ipex=True, bf16=mix_bf16, use_cpu=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / trainer.args.train_batch_size)) trainer = get_regression_trainer( learning_rate=0.1, max_steps=10, use_ipex=True, bf16=mix_bf16, use_cpu=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) def test_neftune(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) args = TrainingArguments( "./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4 ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.model = trainer._activate_neftune(trainer.model) dummy_input = torch.LongTensor([[1, 0, 1]]).to(torch_device) emb1 = trainer.model.get_input_embeddings()(dummy_input) emb2 = trainer.model.get_input_embeddings()(dummy_input) self.assertFalse(torch.allclose(emb1, emb2), "Neftune noise is not applied!") tiny_gpt2 = GPT2LMHeadModel(config) args = TrainingArguments( "./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4 ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() _ = trainer.model(dummy_input) self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) == 0) trainer.model.eval() emb1 = trainer.model.get_input_embeddings()(dummy_input) emb2 = trainer.model.get_input_embeddings()(dummy_input) self.assertTrue(torch.allclose(emb1, emb2), "Neftune noise is still applied!") def test_logging_inf_nan_filter(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=False) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_no_filter = trainer.state.log_history args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=True) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_filter = trainer.state.log_history def is_any_loss_nan_or_inf(log_history): losses = [l["loss"] for l in log_history[:-1]] return any(math.isnan(x) for x in losses) or any(math.isinf(x) for x in losses) self.assertTrue(is_any_loss_nan_or_inf(log_history_no_filter)) self.assertFalse(is_any_loss_nan_or_inf(log_history_filter)) def test_train_and_eval_dataloaders(self): n_gpu = max(1, backend_device_count(torch_device)) trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16) self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16 * n_gpu) trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16 * n_gpu) trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32 ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1) trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32, dataloader_drop_last=True, ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu)) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu)) new_eval_dataset = RegressionDataset(length=128) self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu)) def test_dataloader_without_dataset(self): train_dataset = RegressionDataset(length=128) trainer = CustomDataloaderTrainer( model=RegressionModel(), train_dataset=train_dataset, eval_dataset=train_dataset ) trainer.train() trainer.evaluate() @require_torch_multi_accelerator def test_data_is_not_parallelized_when_model_is_parallel(self): model = RegressionModel() model.is_parallelizable = True model.model_parallel = True args = TrainingArguments("./regression", per_device_train_batch_size=16, per_device_eval_batch_size=16) trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset()) self.assertTrue(trainer.is_model_parallel) self.assertEqual(trainer.args.n_gpu, 1) self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_train_dataloader()), 64 // 16) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_eval_dataloader()), 64 // 16) def test_evaluate(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_evaluate_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), jit_mode_eval=True) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy(), jit_mode_eval=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, jit_mode_eval=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) @require_torch_bf16 @require_intel_extension_for_pytorch def test_evaluate_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, use_cpu=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, eval_len=66, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, use_cpu=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, bf16=mix_bf16, use_cpu=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict(self): trainer = get_regression_trainer(a=1.5, b=2.5) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"]) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_predict_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], jit_mode_eval=True ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) @require_torch_bf16 @require_intel_extension_for_pytorch def test_predict_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, bf16=mix_bf16, use_cpu=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, use_ipex=True, bf16=mix_bf16, use_cpu=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, use_ipex=True, bf16=mix_bf16, use_cpu=True ) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], use_ipex=True, bf16=mix_bf16, use_cpu=True, ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_dynamic_shapes(self): eval_dataset = DynamicShapesDataset(batch_size=self.batch_size) model = RegressionModel(a=2, b=1) args = TrainingArguments("./regression") trainer = Trainer(model, args, eval_dataset=eval_dataset) _ = trainer.evaluate() preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) args = TrainingArguments("./regression", eval_accumulation_steps=2) trainer = Trainer(model, args, eval_dataset=eval_dataset) _ = trainer.evaluate() preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) def test_log_level(self): logger = logging.get_logger() log_info_string = "Running training" is_info = logging.get_verbosity() <= 20 with CaptureLogger(logger) as cl: trainer = get_regression_trainer() trainer.train() if is_info: self.assertIn(log_info_string, cl.out) else: self.assertNotIn(log_info_string, cl.out) with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="debug") trainer.train() self.assertIn(log_info_string, cl.out) with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="error") trainer.train() self.assertNotIn(log_info_string, cl.out) def test_save_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size)) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False) @require_safetensors def test_safe_checkpoints(self): for save_safetensors in [True, False]: with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, save_safetensors=save_safetensors) trainer.train() self.check_saved_checkpoints( tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), safe_weights=save_safetensors ) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, save_steps=5, pretrained=False, save_safetensors=save_safetensors ) trainer.train() self.check_saved_checkpoints( tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False, safe_weights=save_safetensors ) @require_torch_multi_accelerator def test_run_seq2seq_double_train_wrap_once(self): trainer = get_regression_trainer() trainer.train() model_wrapped_before = trainer.model_wrapped trainer.train() model_wrapped_after = trainer.model_wrapped self.assertIs(model_wrapped_before, model_wrapped_after, "should be not wrapped twice") @require_torch_up_to_2_accelerators def test_can_resume_training(self): with tempfile.TemporaryDirectory() as tmpdir: kwargs = { "output_dir": tmpdir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "logging_steps": 5, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) checkpoint = os.path.join(tmpdir, "checkpoint-15") trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) with tempfile.TemporaryDirectory() as tmpdir: kwargs = { "output_dir": tmpdir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "pretrained": False, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) checkpoint = os.path.join(tmpdir, "checkpoint-15") trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) trainer = get_regression_trainer() with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus") self.assertTrue("Can't find a valid checkpoint at" in str(context.exception)) output_dir2 = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=output_dir2) with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=True) self.assertTrue("No valid checkpoint found in output directory" in str(context.exception)) def test_resume_training_with_randomness(self): random_torch = not torch.cuda.is_available() or torch.cuda.device_count() <= 1 if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() with self.subTest("Test every step"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, "checkpoint-15")) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) with self.subTest("Test every epoch"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_strategy="epoch", learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith("checkpoint-")] self.assertEqual(len(checkpoints), 3) checkpoint_dir = sorted(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))[0] trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir)) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) @slow @require_accelerate @require_torch_non_multi_accelerator def test_auto_batch_size_finder(self): if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True SRC_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "..", "examples", "pytorch", "text-classification") ) sys.path.append(SRC_DIR) import run_glue with tempfile.TemporaryDirectory() as tmpdir: testargs = f.split() with self.assertRaises(RuntimeError): with patch.object(sys, "argv", testargs): run_glue.main() testargs[-1] = "1" with patch.object(sys, "argv", testargs): run_glue.main() def test_training_with_resume_from_checkpoint_false(self): train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=False) @require_torch_up_to_2_accelerators def test_resume_training_with_shard_checkpoint(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint) trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_safetensors @require_torch_up_to_2_accelerators def test_resume_training_with_safe_checkpoint(self): for initial_safe in [False, True]: for loaded_safe in [False, True]: with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=initial_safe, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint, load_safe=initial_safe, save_safe=loaded_safe) trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=loaded_safe ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_accelerators def test_resume_training_with_gradient_accumulation(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_accelerators def test_resume_training_with_frozen_params(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train(resume_from_checkpoint=checkpoint) self.assertFalse(trainer.model.a.requires_grad) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) def test_load_best_model_at_end(self): total = int(self.n_epochs * 64 / self.batch_size) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss") with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, evaluation_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total) self.check_best_model_has_been_loaded( tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True ) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, pretrained=False, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False) @require_safetensors def test_load_best_model_from_safetensors(self): total = int(self.n_epochs * 64 / self.batch_size) for save_safetensors, pretrained in product([False, True], [False, True]): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, save_safetensors=save_safetensors, pretrained=pretrained, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=pretrained, safe_weights=save_safetensors) self.check_best_model_has_been_loaded( tmpdir, 5, total, trainer, "eval_loss", is_pretrained=pretrained, safe_weights=save_safetensors ) @slow def test_trainer_eval_mrpc(self): MODEL_ID = "bert-base-cased-finetuned-mrpc" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID) data_args = GlueDataTrainingArguments( task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True ) eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev") training_args = TrainingArguments(output_dir="./examples", use_cpu=True) trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset) result = trainer.evaluate() self.assertLess(result["eval_loss"], 0.2) @slow def test_trainer_eval_lm(self): MODEL_ID = "distilroberta-base" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) self.assertEqual(len(dataset), 31) def test_training_iterable_dataset(self): config = RegressionModelConfig() model = RegressionPreTrainedModel(config) train_dataset = SampleIterableDataset(label_names=["labels", "extra"]) args = RegressionTrainingArguments(output_dir="./examples", max_steps=4) trainer = Trainer(model=model, args=args, train_dataset=train_dataset) trainer.train() self.assertEqual(trainer.state.global_step, 4) loader = trainer.get_train_dataloader() self.assertIsInstance(loader, torch.utils.data.DataLoader) self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler) def test_evaluation_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset(label_names=["labels", "extra"]) args = RegressionTrainingArguments(output_dir="./examples") trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.dataset.x, trainer.eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) eval_dataset = SampleIterableDataset(length=66) results = trainer.evaluate(eval_dataset) x, y = eval_dataset.dataset.x, eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() args = RegressionTrainingArguments(output_dir="./examples") trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) preds = trainer.predict(trainer.eval_dataset).predictions x = eval_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) test_dataset = SampleIterableDataset(length=66, label_names=["labels", "extra"]) preds = trainer.predict(test_dataset).predictions x = test_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) def test_num_train_epochs_in_training(self): trainer = get_regression_trainer( max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5 ) train_output = trainer.train() self.assertEqual(train_output.global_step, 3) trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(self.n_epochs)) def test_early_stopping_callback(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, load_best_model_at_end=True, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1, 0.0001)) train_output = trainer.train() self.assertLess(train_output.global_step, 20 * 64 / 16) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, evaluation_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1)) self.assertEqual(trainer.state.global_step, 0) try: trainer.train() except AssertionError: self.assertEqual(trainer.state.global_step, 0) def test_flos_extraction(self): trainer = get_regression_trainer(learning_rate=0.1) def assert_flos_extraction(trainer, wrapped_model_to_check): self.assertEqual(trainer.model, unwrap_model(wrapped_model_to_check)) self.assertGreaterEqual(getattr(unwrap_model(wrapped_model_to_check).config, "total_flos", 0), 0) assert_flos_extraction(trainer, trainer.model) assert_flos_extraction(trainer, nn.DataParallel(trainer.model)) trainer.train() self.assertTrue(isinstance(trainer.state.total_flos, float)) def check_checkpoint_deletion(self, trainer, output_dir, expected): for n in [5, 10, 15, 20, 25]: os.makedirs(os.path.join(output_dir, f"{PREFIX_CHECKPOINT_DIR}-{n}"), exist_ok=True) trainer._rotate_checkpoints(output_dir=output_dir) glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{PREFIX_CHECKPOINT_DIR}-*")] values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in glob_checkpoints] self.assertSetEqual(set(values), set(expected)) def test_checkpoint_rotation(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2) self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25]) trainer = get_regression_trainer( output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=2 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) trainer = get_regression_trainer( output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=1 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-25") self.check_checkpoint_deletion(trainer, tmp_dir, [25]) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) def check_mem_metrics(self, trainer, check_func): metrics = trainer.train().metrics check_func("init_mem_cpu_alloc_delta", metrics) check_func("train_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("init_mem_gpu_alloc_delta", metrics) check_func("train_mem_gpu_alloc_delta", metrics) metrics = trainer.evaluate() check_func("eval_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("eval_mem_gpu_alloc_delta", metrics) metrics = trainer.predict(RegressionDataset()).metrics check_func("test_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("test_mem_gpu_alloc_delta", metrics) def test_mem_metrics(self): trainer = get_regression_trainer(skip_memory_metrics=False) self.check_mem_metrics(trainer, self.assertIn) trainer = get_regression_trainer(skip_memory_metrics=True) self.check_mem_metrics(trainer, self.assertNotIn) @require_torch_accelerator def test_fp16_full_eval(self): debug = 0 n_gpus = backend_device_count(torch_device) bs = 8 eval_len = 16 * n_gpus a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") self.assertGreater(fp32_init, 59_000) self.assertLess(fp32_eval, 5_000) trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() fp16_init = metrics["init_mem_gpu_alloc_delta"] fp16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp16_init {fp16_init}") print(f"fp16_eval {fp16_eval}") self.assertLess(fp16_init, 5_000) self.assertGreater(fp16_eval, 27_000) self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000) @require_torch_non_multi_gpu @require_torchdynamo @require_torch_tensorrt_fx def test_torchdynamo_full_eval(self): import torchdynamo n_gpus = get_gpu_count() bs = 8 eval_len = 16 * n_gpus a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len) metrics = trainer.evaluate() original_eval_loss = metrics["eval_loss"] del trainer trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="eager") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) del trainer torchdynamo.reset() trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="nvfuser") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="fx2trt") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() @unittest.skip("torch 2.0.0 gives `ModuleNotFoundError: No module named 'torchdynamo'`.") @require_torch_non_multi_gpu @require_torchdynamo def test_torchdynamo_memory(self): import torchdynamo class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): x = inputs["x"] output = model(x) if self.args.n_gpu == 1: return output.mean() return output class MyModule(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): for _ in range(20): x = torch.cos(x) return x mod = MyModule() a = torch.ones(1024, 1024, device="cuda", requires_grad=True) a.grad = None trainer = CustomTrainer(model=mod) for _ in range(10): orig_loss = trainer.training_step(mod, {"x": a}) gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() orig_loss = trainer.training_step(mod, {"x": a}) orig_peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer a = torch.ones(1024, 1024, device="cuda", requires_grad=True) a.grad = None args = TrainingArguments(output_dir="None", torchdynamo="nvfuser") trainer = CustomTrainer(model=mod, args=args) for _ in range(10): loss = trainer.training_step(mod, {"x": a}) gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() loss = trainer.training_step(mod, {"x": a}) peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer self.assertAlmostEqual(loss, orig_loss) self.assertGreater(orig_peak_mem, peak_mem * 2) @require_torch_accelerator @require_torch_bf16 def test_bf16_full_eval(self): debug = 0 n_gpus = backend_device_count(torch_device) bs = 8 eval_len = 16 * n_gpus a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") self.assertGreater(fp32_init, 59_000) self.assertLess(fp32_eval, 5_000) trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() bf16_init = metrics["init_mem_gpu_alloc_delta"] bf16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"bf16_init {bf16_init}") print(f"bf16_eval {bf16_eval}") self.assertLess(bf16_init, 5_000) self.assertGreater(bf16_eval, 27_000) self.assertAlmostEqual(bf16_eval, fp32_init / 2, delta=5_000) def test_no_wd_param_group(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) trainer = Trainer(model=model) trainer.create_optimizer_and_scheduler(10) wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight'] wd_params = [p for n, p in model.named_parameters() if n in wd_names] no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names] self.assertListEqual(trainer.optimizer.param_groups[0]["params"], wd_params) self.assertListEqual(trainer.optimizer.param_groups[1]["params"], no_wd_params) @slow @require_torch_multi_accelerator def test_end_to_end_example(self): script_path = os.path.abspath( os.path.join( os.path.dirname(__file__), "..", "..", "examples", "pytorch", "translation", "run_translation.py" ) ) with tempfile.TemporaryDirectory() as tmpdir: command = [ "accelerate", "launch", script_path, "--model_name_or_path", "t5-small", "--per_device_train_batch_size", "1", "--output_dir", tmpdir, "--overwrite_output_dir", "--do_train", "--max_train_samples", "64", "--num_train_epochs", "1", "--dataset_name", "wmt16", "--dataset_config", "ro-en", "--source_lang", "en", "--target_lang", "ro", "--do_predict", "--max_predict_samples", "64", "--predict_with_generate", "--ddp_timeout", "60", ] execute_subprocess_async(command) @require_torch @is_staging_test class TrainerIntegrationWithHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): for model in ["test-trainer", "test-trainer-epoch", "test-trainer-step", "test-trainer-tensorboard"]: try: delete_repo(token=cls._token, repo_id=model) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-trainer-org") except HTTPError: pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer"), push_to_hub=True, hub_token=self._token, ) url = trainer.push_to_hub() re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, f"{USER}/test-trainer") model = RegressionPreTrainedModel.from_pretrained(repo_name) self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir) trainer.save_model() trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-org"), push_to_hub=True, hub_model_id="valid_org/test-trainer-org", hub_token=self._token, ) url = trainer.push_to_hub() re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, "valid_org/test-trainer-org") model = RegressionPreTrainedModel.from_pretrained("valid_org/test-trainer-org") self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def get_commit_history(self, repo): commit_logs = subprocess.run( "git log".split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, check=True, encoding="utf-8", cwd=repo, ).stdout commits = commit_logs.split("\n\n")[1::2] return [commit.strip() for commit in commits] def test_push_to_hub_with_saves_each_epoch(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-epoch"), push_to_hub=True, hub_token=self._token, hub_always_push=True, save_strategy="epoch", ) trainer.train() commits = list_repo_commits(f"{USER}/test-trainer-epoch", token=self._token) commits = [c.title for c in commits] self.assertIn("initial commit", commits) for i in range(1, 4): self.assertIn(f"Training in progress, epoch {i}", commits) def test_push_to_hub_with_saves_each_n_steps(self): num_gpus = max(1, backend_device_count(torch_device)) if num_gpus > 2: return with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-step"), push_to_hub=True, hub_token=self._token, hub_always_push=True, save_strategy="steps", save_steps=5, ) trainer.train() commits = list_repo_commits(f"{USER}/test-trainer-step", token=self._token) commits = [c.title for c in commits] self.assertIn("initial commit", commits) max_steps = math.ceil(trainer.args.num_train_epochs * len(trainer.get_train_dataloader())) for i in range(5, max_steps, 5): self.assertIn(f"Training in progress, step {i}", commits) @require_tensorboard def test_push_to_hub_with_tensorboard_logs(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-tensorboard"), hub_token=self._token, save_strategy="epoch", report_to=["tensorboard"], keep_report_to=True, ) trainer.train() trainer.push_to_hub() files = list_repo_files(f"{USER}/test-trainer-tensorboard", token=self._token) found_log = False for f in files: if len(f.split("runs")) > 1 and "events.out.tfevents" in f: found_log = True assert found_log is True, "No tensorboard log found in repo" @require_torch @require_optuna class TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4) @require_torch @require_optuna class TrainerHyperParameterMultiObjectOptunaIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.params) def compute_objective(metrics: Dict[str, float]) -> List[float]: return metrics["eval_loss"], metrics["eval_accuracy"] with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=10, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, compute_metrics=AlmostAccuracy(), ) trainer.hyperparameter_search( direction=["minimize", "maximize"], hp_space=hp_space, hp_name=hp_name, n_trials=4, compute_objective=compute_objective, ) @require_torch @require_ray class TrainerHyperParameterRayIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def ray_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): from ray import tune return { "a": tune.randint(-4, 4), "b": tune.randint(-4, 4), } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) def hp_name(params): return MyTrialShortNamer.shortname(params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="ray", n_trials=4 ) def test_hyperparameter_search(self): self.ray_hyperparameter_search() def test_hyperparameter_search_ray_client(self): import ray from ray.util.client.ray_client_helpers import ray_start_client_server with ray_start_client_server(): assert ray.util.client.ray.is_connected() self.ray_hyperparameter_search() @slow @require_torch @require_sigopt class TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return [ {"bounds": {"min": -4, "max": 4}, "name": "a", "type": "int"}, {"bounds": {"min": -4, "max": 4}, "name": "b", "type": "int"}, ] def model_init(trial): if trial is not None: a = trial.assignments["a"] b = trial.assignments["b"] else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.assignments) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="sigopt", n_trials=4 ) optim_test_params = [] if is_torch_available(): default_adam_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "eps": TrainingArguments.adam_epsilon, "lr": TrainingArguments.learning_rate, } default_lion_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "lr": TrainingArguments.learning_rate, } default_anyprecision_kwargs = { "use_kahan_summation": False, "momentum_dtype": torch.float32, "variance_dtype": torch.float32, "compensation_buffer_dtype": torch.bfloat16, } optim_test_params = [ ( TrainingArguments(optim=OptimizerNames.ADAMW_HF, output_dir="None"), transformers.optimization.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAMW_HF.value, output_dir="None"), transformers.optimization.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAMW_TORCH, output_dir="None"), torch.optim.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAFACTOR, output_dir="None"), transformers.optimization.Adafactor, { "scale_parameter": False, "relative_step": False, "lr": TrainingArguments.learning_rate, }, ), ] if is_apex_available(): import apex optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"), apex.optimizers.FusedAdam, default_adam_kwargs, ) ) if is_bitsandbytes_available(): import bitsandbytes as bnb optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.LION, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) if is_torchdistx_available(): import torchdistx optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"), torchdistx.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) ) @require_torch class TrainerOptimizerChoiceTest(unittest.TestCase): def check_optim_and_kwargs(self, training_args: TrainingArguments, expected_cls, expected_kwargs): actual_cls, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args) self.assertEqual(expected_cls, actual_cls) self.assertIsNotNone(optim_kwargs) for p, v in expected_kwargs.items(): self.assertTrue(p in optim_kwargs) actual_v = optim_kwargs[p] self.assertTrue(actual_v == v, f"Failed check for {p}. Expected {v}, but got {actual_v}.") @parameterized.expand(optim_test_params, skip_on_empty=True) def test_optim_supported(self, training_args: TrainingArguments, expected_cls, expected_kwargs): self.check_optim_and_kwargs(training_args, expected_cls, expected_kwargs) trainer = get_regression_trainer(**training_args.to_dict()) trainer.train() def test_fused_adam(self): mock = Mock() modules = { "apex": mock, "apex.optimizers": mock.optimizers, "apex.optimizers.FusedAdam": mock.optimizers.FusedAdam, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"), mock.optimizers.FusedAdam, default_adam_kwargs, ) def test_fused_adam_no_apex(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None") with patch.dict("sys.modules", {"apex.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_adam8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit_alias(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_adam8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None") with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None") with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None") with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None") with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None") with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_anyprecision_adamw(self): mock = Mock() modules = { "torchdistx": mock, "torchdistx.optimizers": mock.optimizers, "torchdistx.optimizers.AnyPrecisionAdamW.": mock.optimizers.AnyPrecisionAdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"), mock.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) def test_no_torchdistx_anyprecision_adamw(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None") with patch.dict("sys.modules", {"torchdistx.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) @require_torch @require_wandb class TrainerHyperParameterWandbIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return { "method": "random", "metric": {}, "parameters": { "a": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, "b": {"distribution": "int_uniform", "min": 1, "max": 6}, }, } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) def hp_name(params): return MyTrialShortNamer.shortname(params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="wandb", n_trials=4, anonymous="must" ) class HyperParameterSearchBackendsTest(unittest.TestCase): def test_hyperparameter_search_backends(self): self.assertEqual( list(ALL_HYPERPARAMETER_SEARCH_BACKENDS.keys()), list(HPSearchBackend), )
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license disabletqdm in trainingarguments has a flaky default since it depends on the level of logging we make sure its set to false since the tests later on depend on its value order doesn t matter callbacks passed at init are added to the default callbacks trainingarguments disabletqdm controls if use progresscallback or printercallback we can add pop or remove by class name we can also add pop or remove by instance xxx for now ignore scattergather warnings in this test since it s not relevant to what s being tested independent logsaveeval a bit of everything warning should be emitted for duplicated callbacks 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license disable_tqdm in trainingarguments has a flaky default since it depends on the level of logging we make sure its set to false since the tests later on depend on its value order doesn t matter callbacks passed at init are added to the default callbacks trainingarguments disable_tqdm controls if use progresscallback or printercallback we can add pop or remove by class name we can also add pop or remove by instance xxx for now ignore scatter_gather warnings in this test since it s not relevant to what s being tested independent log save eval a bit of everything warning should be emitted for duplicated callbacks
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class MyTestTrainerCallback(TrainerCallback): "A callback that registers the events that goes through." def __init__(self): self.events = [] def on_init_end(self, args, state, control, **kwargs): self.events.append("on_init_end") def on_train_begin(self, args, state, control, **kwargs): self.events.append("on_train_begin") def on_train_end(self, args, state, control, **kwargs): self.events.append("on_train_end") def on_epoch_begin(self, args, state, control, **kwargs): self.events.append("on_epoch_begin") def on_epoch_end(self, args, state, control, **kwargs): self.events.append("on_epoch_end") def on_step_begin(self, args, state, control, **kwargs): self.events.append("on_step_begin") def on_step_end(self, args, state, control, **kwargs): self.events.append("on_step_end") def on_evaluate(self, args, state, control, **kwargs): self.events.append("on_evaluate") def on_predict(self, args, state, control, **kwargs): self.events.append("on_predict") def on_save(self, args, state, control, **kwargs): self.events.append("on_save") def on_log(self, args, state, control, **kwargs): self.events.append("on_log") def on_prediction_step(self, args, state, control, **kwargs): self.events.append("on_prediction_step") @require_torch class TrainerCallbackTest(unittest.TestCase): def setUp(self): self.output_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.output_dir) def get_trainer(self, a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs): train_dataset = RegressionDataset(length=train_len) eval_dataset = RegressionDataset(length=eval_len) config = RegressionModelConfig(a=a, b=b) model = RegressionPreTrainedModel(config) args = TrainingArguments(self.output_dir, disable_tqdm=disable_tqdm, report_to=[], **kwargs) return Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=callbacks, ) def check_callbacks_equality(self, cbs1, cbs2): self.assertEqual(len(cbs1), len(cbs2)) cbs1 = sorted(cbs1, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) cbs2 = sorted(cbs2, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) for cb1, cb2 in zip(cbs1, cbs2): if isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1, cb2) elif isinstance(cb1, type) and not isinstance(cb2, type): self.assertEqual(cb1, cb2.__class__) elif not isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1.__class__, cb2) else: self.assertEqual(cb1, cb2) def get_expected_events(self, trainer): expected_events = ["on_init_end", "on_train_begin"] step = 0 train_dl_len = len(trainer.get_eval_dataloader()) evaluation_events = ["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs): expected_events.append("on_epoch_begin") for _ in range(train_dl_len): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save") expected_events.append("on_epoch_end") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def test_init_callback(self): trainer = self.get_trainer() expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(MyTestTrainerCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer(disable_tqdm=True) expected_callbacks = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_add_remove_callback(self): expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] trainer = self.get_trainer() trainer.remove_callback(DefaultFlowCallback) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb = trainer.pop_callback(DefaultFlowCallback) self.assertEqual(cb.__class__, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(DefaultFlowCallback) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb = trainer.callback_handler.callbacks[0] trainer.remove_callback(cb) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb1 = trainer.callback_handler.callbacks[0] cb2 = trainer.pop_callback(cb1) self.assertEqual(cb1, cb2) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(cb1) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_event_flow(self): import warnings warnings.simplefilter(action="ignore", category=UserWarning) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy="steps") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy="epoch") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer( callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy="steps", ) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) with patch("transformers.trainer_callback.logger.warning") as warn_mock: trainer = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], ) assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0]
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license add some unused params otherwise ddp will complain split outputdir self getautoremovetmpdir args foutputdir outputdir split cmd torchrun distributedargs args executesubprocessasynccmd envself getenv successful return here success any errors would have caused an error in the subcall class testtrainerdistributednputestcaseplus requiretorchnpu def testtrainerself distributedargs fnprocpernode2 masterportgettorchdistuniqueport self testfiledirtesttrainerdistributed py successful return here success any errors would have caused an error in the subcall split outputdir self getautoremovetmpdir args foutputdir outputdir split cmd torchrun distributedargs args executesubprocessasynccmd envself getenv successful return here success any errors would have caused an error in the subcall requiretorchmultixpu class testtrainerdistributedxputestcaseplus def testtrainerself distributedargs fnprocpernodetorch xpu devicecount masterportgettorchdistuniqueport self testfiledirtesttrainerdistributed py successful return here success any errors would have caused an error in the subcall the script below is meant to be run under torch distributed on a machine with multiple gpus pythonpathsrc python m torch distributed run nprocpernode 2 outputdir outputdir teststesttrainerdistributed py essentially what we want to verify in the distributed case is that we get all samples back in the right order this is crucial for prediction for instance check that dispatchbatchesfalse will work on a finite iterable dataset 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license add some unused params otherwise ddp will complain nproc_per_node 2 master_port get_torch_dist_unique_port self test_file_dir test_trainer_distributed py successful return here success any errors would have caused an error in the sub call nproc_per_node 2 master_port get_torch_dist_unique_port self test_file_dir test_trainer_distributed py successful return here success any errors would have caused an error in the sub call nproc_per_node torch cuda device_count master_port get_torch_dist_unique_port self test_file_dir test_trainer_distributed py successful return here success any errors would have caused an error in the sub call nproc_per_node torch xpu device_count master_port get_torch_dist_unique_port self test_file_dir test_trainer_distributed py successful return here success any errors would have caused an error in the sub call the script below is meant to be run under torch distributed on a machine with multiple gpus pythonpath src python m torch distributed run nproc_per_node 2 output_dir output_dir tests test_trainer_distributed py essentially what we want to verify in the distributed case is that we get all samples back in the right order this is crucial for prediction for instance check that dispatch_batches false will work on a finite iterable dataset
from typing import Dict import numpy as np from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_multi_xpu, require_torch_neuroncore, require_torch_npu, ) from transformers.training_args import ParallelMode from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset, IterableDataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids class RegressionModel(nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.double_output = double_output self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class SampleIterableDataset(IterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names) def __iter__(self): for i in range(len(self.dataset)): yield self.dataset[i] class FiniteIterableDataset(SampleIterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): super().__init__(a, b, length, seed, label_names) self.current_sample = 0 def __iter__(self): while self.current_sample < len(self.dataset): yield self.dataset[self.current_sample] self.current_sample += 1 class RegressionDataset: def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): np.random.seed(seed) self.label_names = ["labels"] if label_names is None else label_names self.length = length self.x = np.random.normal(size=(length,)).astype(np.float32) self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names] self.ys = [y.astype(np.float32) for y in self.ys] def __len__(self): return self.length def __getitem__(self, i): result = {name: y[i] for name, y in zip(self.label_names, self.ys)} result["input_x"] = self.x[i] return result class TestTrainerDistributedNeuronCore(TestCasePlus): @require_torch_neuroncore def test_trainer(self): distributed_args = f.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) class TestTrainerDistributedNPU(TestCasePlus): @require_torch_npu def test_trainer(self): distributed_args = f.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) class TestTrainerDistributed(TestCasePlus): @require_torch_multi_gpu def test_trainer(self): distributed_args = f.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) @require_torch_multi_xpu class TestTrainerDistributedXPU(TestCasePlus): def test_trainer(self): distributed_args = f.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) if __name__ == "__main__": parser = HfArgumentParser((TrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) for dataset_length in [101, 40, 7]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None train_dataset = FiniteIterableDataset(label_names=["labels", "extra"], length=1) model = RegressionModel() training_args.per_device_train_batch_size = 1 training_args.max_steps = 1 training_args.dispatch_batches = False trainer = Trainer(model, training_args, train_dataset=train_dataset) trainer.train()
codingutf8 2020 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tokenizer will automatically set bos text eos all unnecessary tokens are removed map train dataset same for validation dataset instantiate trainer start training tests that the number of generated sequences is correct when numreturnsequences 1 and essentially ensuring that accelerator gather is used instead of gatherformetrics remove pairs where at least one record is none coding utf 8 2020 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tokenizer will automatically set bos text eos all unnecessary tokens are removed map train dataset same for validation dataset instantiate trainer start training tests that the number of generated sequences is correct when num_return_sequences 1 and essentially ensuring that accelerator gather is used instead of gather_for_metrics remove pairs where at least one record is none 38
from transformers import ( AutoModelForSeq2SeqLM, BertTokenizer, DataCollatorForSeq2Seq, EncoderDecoderModel, GenerationConfig, Seq2SeqTrainer, Seq2SeqTrainingArguments, T5Tokenizer, ) from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class Seq2seqTrainerTester(TestCasePlus): @slow @require_torch def test_finetune_bert2bert(self): bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny") tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size bert2bert.config.eos_token_id = tokenizer.sep_token_id bert2bert.config.decoder_start_token_id = tokenizer.cls_token_id bert2bert.config.max_length = 128 train_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="train[:1%]") val_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="validation[:1%]") train_dataset = train_dataset.select(range(32)) val_dataset = val_dataset.select(range(16)) batch_size = 4 def _map_to_encoder_decoder_inputs(batch): inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512) outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=128) batch["input_ids"] = inputs.input_ids batch["attention_mask"] = inputs.attention_mask batch["decoder_input_ids"] = outputs.input_ids batch["labels"] = outputs.input_ids.copy() batch["labels"] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] batch["decoder_attention_mask"] = outputs.attention_mask assert all(len(x) == 512 for x in inputs.input_ids) assert all(len(x) == 128 for x in outputs.input_ids) return batch def _compute_metrics(pred): labels_ids = pred.label_ids pred_ids = pred.predictions pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True) accuracy = sum([int(pred_str[i] == label_str[i]) for i in range(len(pred_str))]) / len(pred_str) return {"accuracy": accuracy} train_dataset = train_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) train_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) val_dataset = val_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) val_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) output_dir = self.get_auto_remove_tmp_dir() training_args = Seq2SeqTrainingArguments( output_dir=output_dir, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, predict_with_generate=True, evaluation_strategy="steps", do_train=True, do_eval=True, warmup_steps=0, eval_steps=2, logging_steps=2, ) trainer = Seq2SeqTrainer( model=bert2bert, args=training_args, compute_metrics=_compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=tokenizer, ) trainer.train() @slow @require_torch def test_return_sequences(self): INPUT_COLUMN = "question" TARGET_COLUMN = "answer" MAX_INPUT_LENGTH = 256 MAX_TARGET_LENGTH = 256 dataset = datasets.load_dataset("gsm8k", "main", split="train[:38]") model = AutoModelForSeq2SeqLM.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest") gen_config = GenerationConfig.from_pretrained( "t5-small", max_length=None, min_length=None, max_new_tokens=256, min_new_tokens=1, num_beams=5 ) training_args = Seq2SeqTrainingArguments(".", predict_with_generate=True) trainer = Seq2SeqTrainer( model=model, args=training_args, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=lambda x: {"samples": x[0].shape[0]}, ) def prepare_data(examples): inputs = examples[INPUT_COLUMN] targets = examples[TARGET_COLUMN] model_inputs = tokenizer(inputs, max_length=MAX_INPUT_LENGTH, truncation=True) labels = tokenizer(text_target=targets, max_length=MAX_TARGET_LENGTH, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs prepared_dataset = dataset.map(prepare_data, batched=True, remove_columns=[INPUT_COLUMN, TARGET_COLUMN]) dataset_len = len(prepared_dataset) for num_return_sequences in range(3, 0, -1): gen_config.num_return_sequences = num_return_sequences metrics = trainer.evaluate(eval_dataset=prepared_dataset, generation_config=gen_config) assert ( metrics["eval_samples"] == dataset_len * num_return_sequences ), f"Got {metrics['eval_samples']}, expected: {dataset_len * num_return_sequences}"
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this test is meant to be run in on an instance with tpus like this python examplespytorchxlaspawn py numcores8 teststesttrainertpu py replace 8 with the number of tpu cores you have add some unused params otherwise ddp will complain essentially what we want to verify in the distributed case is that we get all samples back in the right order this is crucial for prediction for instance for xlaspawn tpus 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this test is meant to be run in on an instance with tpus like this python examples pytorch xla_spawn py num_cores 8 tests test_trainer_tpu py replace 8 with the number of tpu cores you have add some unused params otherwise ddp will complain essentially what we want to verify in the distributed case is that we get all samples back in the right order this is crucial for prediction for instance for xla_spawn tpus
import sys from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids def main(): parser = HfArgumentParser((TrainingArguments,)) sys.argv += ["--output_dir", "./examples"] training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, " f"tpu_num_cores: {training_args.tpu_num_cores}", ) for dataset_length in [1001, 256, 15]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None logger.info("🔥 All distributed tests successful") def _mp_fn(index): main() if __name__ == "__main__": main()
codingutf8 2018 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for testing an iterable dataset of random length simulate a result with a dataset of size 21 4 processes and chunks of lengths 2 3 1 with nested tensors simulate a result with a dataset of size 21 4 processes and chunks of lengths 2 3 1 remove the extra samples added at the end for a round multiple of num processes with nested tensors check if works if varying seqlength is second with a few 100 labels mask the log probs with the 100 labels get some inputs of random lengths put one bigger than the others to check it ends up in first position the biggest element should be first the indices should be a permutation of range100 get some inputs of random lengths put one bigger than the others to check it ends up in first position the biggest element should be first the indices should be a permutation of range6 get some inputs of random lengths put one bigger than the others to check it ends up in first position the biggest element should be first the indices should be a permutation of range6 get some inputs of random lengths put one bigger than the others to check it ends up in first position the biggest element should be first the indices should be a permutation of range100 fmt off fmt on set seeds sample sample with a batchsize passed sample set the seed for the base dataset to get the proper reference all shards have a number of samples that is a round multiple of batch size all shards have the same number of samples all shards know the total number of samples if droplast is false we loop through samples at the beginning to have a size that is a round multiple of batchsize check equivalence between iterabledataset and shardsampler build expected shards each process will have batches of size 4 until there is not enough elements to form two full batches so we stop at 96 100 4 2 4 when droplastfalse we get two last full batches by looping back to the beginning all shards have a number of samples that is a round multiple of batch size all shards have the same number of samples if droplast is false we loop through samples at the beginning to have a size that is a round multiple of batchsize tests whether padandconcatenate works with scalars array1 1 0 array2 2 0 result numpypadandconcatenatearray1 array2 self asserttruenp arrayequalnp array1 0 2 0 result tensor1 torch tensor1 0 tensor2 torch tensor2 0 result torchpadandconcatenatetensor1 tensor2 self asserttruetorch equalresult torch tensor1 0 2 0 def testremovecolumnscollatorself class mocklogger def initself none self called 0 def infoself msg self called 1 self lastmsg msg databatch col1 1 col2 2 col3 3 col1 1 col2 2 col3 3 logger mocklogger removecolumnscollator removecolumnscollator defaultdatacollator col1 col2 logger model training self assertnotincol3 removecolumnscollatordatabatch check that the logging message is printed out only once removecolumnscollatordatabatch removecolumnscollatordatabatch self assertequallogger called 1 self assertincol3 logger lastmsg coding utf 8 2018 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for testing an iterable dataset of random length simulate a result with a dataset of size 21 4 processes and chunks of lengths 2 3 1 with nested tensors simulate a result with a dataset of size 21 4 processes and chunks of lengths 2 3 1 remove the extra samples added at the end for a round multiple of num processes with nested tensors check if works if varying seq_length is second with a few 100 labels mask the log probs with the 100 labels get some inputs of random lengths put one bigger than the others to check it ends up in first position the biggest element should be first the indices should be a permutation of range 100 get some inputs of random lengths put one bigger than the others to check it ends up in first position the biggest element should be first the indices should be a permutation of range 6 get some inputs of random lengths put one bigger than the others to check it ends up in first position the biggest element should be first the indices should be a permutation of range 6 get some inputs of random lengths put one bigger than the others to check it ends up in first position the biggest element should be first the indices should be a permutation of range 100 fmt off fmt on set seeds sample sample with a batch_size passed sample set the seed for the base dataset to get the proper reference all shards have a number of samples that is a round multiple of batch size all shards have the same number of samples all shards know the total number of samples if drop_last is false we loop through samples at the beginning to have a size that is a round multiple of batch_size check equivalence between iterabledataset and shardsampler build expected shards each process will have batches of size 4 until there is not enough elements to form two full batches so we stop at 96 100 4 2 4 when drop_last false we get two last full batches by looping back to the beginning all shards have a number of samples that is a round multiple of batch size all shards have the same number of samples if drop_last is false we loop through samples at the beginning to have a size that is a round multiple of batch_size tests whether pad_and_concatenate works with scalars check that the logging message is printed out only once
import copy import unittest import numpy as np from transformers.data.data_collator import default_data_collator from transformers.testing_utils import require_accelerate, require_torch from transformers.trainer_utils import RemoveColumnsCollator, find_executable_batch_size from transformers.utils import is_torch_available if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset from transformers.modeling_outputs import SequenceClassifierOutput from transformers.tokenization_utils_base import BatchEncoding from transformers.trainer_pt_utils import ( DistributedLengthGroupedSampler, DistributedSamplerWithLoop, DistributedTensorGatherer, IterableDatasetShard, LabelSmoother, LengthGroupedSampler, SequentialDistributedSampler, ShardSampler, get_parameter_names, numpy_pad_and_concatenate, torch_pad_and_concatenate, ) class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) class RandomIterableDataset(IterableDataset): def __init__(self, p_stop=0.01, max_length=1000): self.p_stop = p_stop self.max_length = max_length self.generator = torch.Generator() def __iter__(self): count = 0 stop = False while not stop and count < self.max_length: yield count count += 1 number = torch.rand(1, generator=self.generator).item() stop = number < self.p_stop @require_torch class TrainerUtilsTest(unittest.TestCase): def test_distributed_tensor_gatherer(self): world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays(predictions[indices]) result = gatherer.finalize() self.assertTrue(np.array_equal(result, predictions)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays([predictions[indices], [predictions[indices], predictions[indices]]]) result = gatherer.finalize() self.assertTrue(isinstance(result, list)) self.assertEqual(len(result), 2) self.assertTrue(isinstance(result[1], list)) self.assertEqual(len(result[1]), 2) self.assertTrue(np.array_equal(result[0], predictions)) self.assertTrue(np.array_equal(result[1][0], predictions)) self.assertTrue(np.array_equal(result[1][1], predictions)) def test_distributed_tensor_gatherer_different_shapes(self): world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] sequence_lengths = [8, 10, 13] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays(predictions[indices, :seq_length]) result = gatherer.finalize() actual_indices = [input_indices[0], input_indices[1][:-2], input_indices[2][:-1]] for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[indices, :seq_length], predictions[indices, :seq_length])) predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices, :seq_length], predictions[indices]]) result = gatherer.finalize() for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[0][indices, :seq_length], predictions[indices, :seq_length])) self.assertTrue(np.array_equal(result[1], predictions)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices], predictions[indices, :seq_length]]) result = gatherer.finalize() self.assertTrue(np.array_equal(result[0], predictions)) for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[1][indices, :seq_length], predictions[indices, :seq_length])) def test_label_smoothing(self): epsilon = 0.1 num_labels = 12 random_logits = torch.randn(4, 5, num_labels) random_labels = torch.randint(0, num_labels, (4, 5)) loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean() self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss)) random_labels[0, 1] = -100 random_labels[2, 1] = -100 random_labels[2, 3] = -100 loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) log_probs[0, 1] = 0.0 log_probs[2, 1] = 0.0 log_probs[2, 3] = 0.0 expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17) self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss)) def test_group_by_length(self): lengths = torch.randint(0, 25, (100,)).tolist() lengths[32] = 50 indices = list(LengthGroupedSampler(4, lengths=lengths)) self.assertEqual(lengths[indices[0]], 50) self.assertEqual(sorted(indices), list(range(100))) def test_group_by_length_with_dict(self): data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append({"input_ids": input_ids}) data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) self.assertEqual(len(data[indices[0]]["input_ids"]), 105) self.assertEqual(sorted(indices), list(range(6))) def test_group_by_length_with_batch_encoding(self): data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append(BatchEncoding({"input_ids": input_ids})) data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) self.assertEqual(len(data[indices[0]]["input_ids"]), 105) self.assertEqual(sorted(indices), list(range(6))) def test_distributed_length_grouped(self): lengths = torch.randint(0, 25, (100,)).tolist() lengths[32] = 50 indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths)) indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths)) self.assertEqual(lengths[indices_process_0[0]], 50) self.assertEqual(sorted(indices_process_0 + indices_process_1), list(range(100))) def test_get_parameter_names(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) self.assertEqual( get_parameter_names(model, [nn.LayerNorm]), ['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias'] ) def test_distributed_sampler_with_loop(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0) shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1) shard1.set_epoch(0) shard2.set_epoch(0) samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = [] for sample1, sample2 in zip(samples1, samples2): total += [sample1, sample2] self.assertEqual(set(total[:length]), set(dataset)) self.assertEqual(set(total[length:]), set(total[: (len(total) - length)])) def test_sequential_distributed_sampler(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1) samples1 = list(shard1) samples2 = list(shard2) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0, batch_size=batch_size) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1, batch_size=batch_size) samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0): dataset.generator.manual_seed(epoch) reference = list(dataset) shards = [ IterableDatasetShard( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard in shards: shard.set_epoch(epoch) shard_lists = [list(shard) for shard in shards] for shard in shard_lists: self.assertTrue(len(shard) % batch_size == 0) self.assertEqual(len(shard), len(shard_lists[0])) for shard in shards: self.assertEqual(shard.num_examples, len(reference)) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) dataset.generator.manual_seed(epoch) reference = list(dataset) sampler_shards = [ ShardSampler( reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard, sampler_shard in zip(shard_lists, sampler_shards): self.assertListEqual(shard, list(sampler_shard)) def test_iterable_dataset_shard(self): dataset = RandomIterableDataset() self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42) def test_iterable_dataset_shard_with_length(self): sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i) for i in range(2) ] expected_shards = [[], []] current_shard = 0 for i in range(0, 96, 4): expected_shards[current_shard].extend(list(range(i, i + 4))) current_shard = 1 - current_shard self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i) for i in range(2) ] expected_shards[0].extend(list(range(96, 100))) expected_shards[1].extend(list(range(0, 4))) self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2): shards = [ ShardSampler( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] shard_lists = [list(shard) for shard in shards] for shard in shard_lists: self.assertTrue(len(shard) % batch_size == 0) self.assertEqual(len(shard), len(shard_lists[0])) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] reference = copy.copy(dataset) if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) def test_shard_sampler(self): for n_elements in [64, 123]: dataset = list(range(n_elements)) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3) @require_accelerate def test_executable_batch_size(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=True) def mock_training_loop_function(batch_size): nonlocal batch_sizes batch_sizes.append(batch_size) if batch_size > 16: raise RuntimeError("CUDA out of memory.") mock_training_loop_function() self.assertEqual(batch_sizes, [64, 32, 16]) @require_accelerate def test_executable_batch_size_no_search(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False) def mock_training_loop_function(batch_size): nonlocal batch_sizes batch_sizes.append(batch_size) mock_training_loop_function() self.assertEqual(batch_sizes, [64]) @require_accelerate def test_executable_batch_size_with_error(self): @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False) def mock_training_loop_function(batch_size): raise RuntimeError("CUDA out of memory.") with self.assertRaises(RuntimeError) as cm: mock_training_loop_function() self.assertEqual("CUDA out of memory", cm.args[0]) def test_pad_and_concatenate_with_1d(self): array1 = 1.0 array2 = 2.0 result = numpy_pad_and_concatenate(array1, array2) self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result)) tensor1 = torch.tensor(1.0) tensor2 = torch.tensor(2.0) result = torch_pad_and_concatenate(tensor1, tensor2) self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0]))) def test_remove_columns_collator(self): class MockLogger: def __init__(self) -> None: self.called = 0 def info(self, msg): self.called += 1 self.last_msg = msg data_batch = [ {"col1": 1, "col2": 2, "col3": 3}, {"col1": 1, "col2": 2, "col3": 3}, ] logger = MockLogger() remove_columns_collator = RemoveColumnsCollator( default_data_collator, ["col1", "col2"], logger, "model", "training" ) self.assertNotIn("col3", remove_columns_collator(data_batch)) remove_columns_collator(data_batch) remove_columns_collator(data_batch) self.assertEqual(logger.called, 1) self.assertIn("col3", logger.last_msg)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class TestActivations(unittest.TestCase): def test_gelu_versions(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") self.assertTrue(torch.allclose(gelu_python(x), torch_builtin(x))) self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x))) def test_gelu_10(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") gelu10 = get_activation("gelu_10") y_gelu = torch_builtin(x) y_gelu_10 = gelu10(x) clipped_mask = torch.where(y_gelu_10 < 10.0, 1, 0) self.assertTrue(torch.max(y_gelu_10).item() == 10.0) self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask)) def test_get_activation(self): get_activation("gelu") get_activation("gelu_10") get_activation("gelu_fast") get_activation("gelu_new") get_activation("gelu_python") get_activation("gelu_pytorch_tanh") get_activation("linear") get_activation("mish") get_activation("quick_gelu") get_activation("relu") get_activation("sigmoid") get_activation("silu") get_activation("swish") get_activation("tanh") with self.assertRaises(KeyError): get_activation("bogus") with self.assertRaises(KeyError): get_activation(None) def test_activations_are_distinct_objects(self): act1 = get_activation("gelu") act1.a = 1 act2 = get_activation("gelu") self.assertEqual(act1.a, 1) with self.assertRaises(AttributeError): _ = act2.a
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest import numpy as np from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers.activations_tf import get_tf_activation @require_tf class TestTFActivations(unittest.TestCase): def test_gelu_10(self): x = tf.constant([-100, -1.0, -0.1, 0, 0.1, 1.0, 100.0]) gelu = get_tf_activation("gelu") gelu10 = get_tf_activation("gelu_10") y_gelu = gelu(x) y_gelu_10 = gelu10(x) clipped_mask = tf.where(y_gelu_10 < 10.0, 1.0, 0.0) self.assertEqual(tf.math.reduce_max(y_gelu_10).numpy().item(), 10.0) self.assertTrue(np.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask)) def test_get_activation(self): get_tf_activation("gelu") get_tf_activation("gelu_10") get_tf_activation("gelu_fast") get_tf_activation("gelu_new") get_tf_activation("glu") get_tf_activation("mish") get_tf_activation("quick_gelu") get_tf_activation("relu") get_tf_activation("sigmoid") get_tf_activation("silu") get_tf_activation("swish") get_tf_activation("tanh") with self.assertRaises(KeyError): get_tf_activation("bogus") with self.assertRaises(KeyError): get_tf_activation(None)
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on impulse shifted in time fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on
import unittest import numpy as np import pytest from transformers.audio_utils import ( amplitude_to_db, hertz_to_mel, mel_filter_bank, mel_to_hertz, power_to_db, spectrogram, window_function, ) class AudioUtilsFunctionTester(unittest.TestCase): def test_hertz_to_mel(self): self.assertEqual(hertz_to_mel(0.0), 0.0) self.assertAlmostEqual(hertz_to_mel(100), 150.48910241) inputs = np.array([100, 200]) expected = np.array([150.48910241, 283.22989816]) self.assertTrue(np.allclose(hertz_to_mel(inputs), expected)) self.assertEqual(hertz_to_mel(0.0, "slaney"), 0.0) self.assertEqual(hertz_to_mel(100, "slaney"), 1.5) inputs = np.array([60, 100, 200, 1000, 1001, 2000]) expected = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016]) self.assertTrue(np.allclose(hertz_to_mel(inputs, "slaney"), expected)) inputs = np.array([60, 100, 200, 1000, 1001, 2000]) expected = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674]) self.assertTrue(np.allclose(hertz_to_mel(inputs, "kaldi"), expected)) with pytest.raises(ValueError): hertz_to_mel(100, mel_scale=None) def test_mel_to_hertz(self): self.assertEqual(mel_to_hertz(0.0), 0.0) self.assertAlmostEqual(mel_to_hertz(150.48910241), 100) inputs = np.array([150.48910241, 283.22989816]) expected = np.array([100, 200]) self.assertTrue(np.allclose(mel_to_hertz(inputs), expected)) self.assertEqual(mel_to_hertz(0.0, "slaney"), 0.0) self.assertEqual(mel_to_hertz(1.5, "slaney"), 100) inputs = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016]) expected = np.array([60, 100, 200, 1000, 1001, 2000]) self.assertTrue(np.allclose(mel_to_hertz(inputs, "slaney"), expected)) inputs = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674]) expected = np.array([60, 100, 200, 1000, 1001, 2000]) self.assertTrue(np.allclose(mel_to_hertz(inputs, "kaldi"), expected)) with pytest.raises(ValueError): mel_to_hertz(100, mel_scale=None) def test_mel_filter_bank_shape(self): mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm=None, mel_scale="htk", ) self.assertEqual(mel_filters.shape, (513, 13)) mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm="slaney", mel_scale="slaney", ) self.assertEqual(mel_filters.shape, (513, 13)) mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm="slaney", mel_scale="slaney", triangularize_in_mel_space=True, ) self.assertEqual(mel_filters.shape, (513, 13)) def test_mel_filter_bank_htk(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm=None, mel_scale="htk", ) expected = np.array([ [0.0 , 0.0 , 0.0 , 0.0 ], [0.61454786, 0.0 , 0.0 , 0.0 ], [0.82511046, 0.17488954, 0.0 , 0.0 ], [0.35597035, 0.64402965, 0.0 , 0.0 ], [0.0 , 0.91360726, 0.08639274, 0.0 ], [0.0 , 0.55547007, 0.44452993, 0.0 ], [0.0 , 0.19733289, 0.80266711, 0.0 ], [0.0 , 0.0 , 0.87724349, 0.12275651], [0.0 , 0.0 , 0.6038449 , 0.3961551 ], [0.0 , 0.0 , 0.33044631, 0.66955369], [0.0 , 0.0 , 0.05704771, 0.94295229], [0.0 , 0.0 , 0.0 , 0.83483975], [0.0 , 0.0 , 0.0 , 0.62612982], [0.0 , 0.0 , 0.0 , 0.41741988], [0.0 , 0.0 , 0.0 , 0.20870994], [0.0 , 0.0 , 0.0 , 0.0 ] ]) self.assertTrue(np.allclose(mel_filters, expected)) def test_mel_filter_bank_slaney(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm=None, mel_scale="slaney", ) expected = np.array([ [0.0 , 0.0 , 0.0 , 0.0 ], [0.39869419, 0.0 , 0.0 , 0.0 ], [0.79738839, 0.0 , 0.0 , 0.0 ], [0.80391742, 0.19608258, 0.0 , 0.0 ], [0.40522322, 0.59477678, 0.0 , 0.0 ], [0.00652903, 0.99347097, 0.0 , 0.0 ], [0.0 , 0.60796161, 0.39203839, 0.0 ], [0.0 , 0.20939631, 0.79060369, 0.0 ], [0.0 , 0.0 , 0.84685344, 0.15314656], [0.0 , 0.0 , 0.52418477, 0.47581523], [0.0 , 0.0 , 0.2015161 , 0.7984839 ], [0.0 , 0.0 , 0.0 , 0.9141874 ], [0.0 , 0.0 , 0.0 , 0.68564055], [0.0 , 0.0 , 0.0 , 0.4570937 ], [0.0 , 0.0 , 0.0 , 0.22854685], [0.0 , 0.0 , 0.0 , 0.0 ] ]) self.assertTrue(np.allclose(mel_filters, expected)) def test_mel_filter_bank_kaldi(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) expected = np.array( [[0.0000, 0.0000, 0.0000, 0.0000], [0.6086, 0.0000, 0.0000, 0.0000], [0.8689, 0.1311, 0.0000, 0.0000], [0.4110, 0.5890, 0.0000, 0.0000], [0.0036, 0.9964, 0.0000, 0.0000], [0.0000, 0.6366, 0.3634, 0.0000], [0.0000, 0.3027, 0.6973, 0.0000], [0.0000, 0.0000, 0.9964, 0.0036], [0.0000, 0.0000, 0.7135, 0.2865], [0.0000, 0.0000, 0.4507, 0.5493], [0.0000, 0.0000, 0.2053, 0.7947], [0.0000, 0.0000, 0.0000, 0.9752], [0.0000, 0.0000, 0.0000, 0.7585], [0.0000, 0.0000, 0.0000, 0.5539], [0.0000, 0.0000, 0.0000, 0.3599], [0.0000, 0.0000, 0.0000, 0.1756]] ) self.assertTrue(np.allclose(mel_filters, expected, atol=5e-5)) def test_mel_filter_bank_slaney_norm(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm="slaney", mel_scale="slaney", ) expected = np.array([ [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [1.19217795e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [2.38435591e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [2.40387905e-03, 5.86232616e-04, 0.00000000e+00, 0.00000000e+00], [1.21170110e-03, 1.77821783e-03, 0.00000000e+00, 0.00000000e+00], [1.95231437e-05, 2.97020305e-03, 0.00000000e+00, 0.00000000e+00], [0.00000000e+00, 1.81763684e-03, 1.04857612e-03, 0.00000000e+00], [0.00000000e+00, 6.26036972e-04, 2.11460963e-03, 0.00000000e+00], [0.00000000e+00, 0.00000000e+00, 2.26505954e-03, 3.07332945e-04], [0.00000000e+00, 0.00000000e+00, 1.40202503e-03, 9.54861093e-04], [0.00000000e+00, 0.00000000e+00, 5.38990521e-04, 1.60238924e-03], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.83458185e-03], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.37593638e-03], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 9.17290923e-04], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 4.58645462e-04], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00] ]) self.assertTrue(np.allclose(mel_filters, expected)) def test_window_function(self): window = window_function(16, "hann") self.assertEqual(len(window), 16) expected = np.array([ 0.0, 0.03806023, 0.14644661, 0.30865828, 0.5, 0.69134172, 0.85355339, 0.96193977, 1.0, 0.96193977, 0.85355339, 0.69134172, 0.5, 0.30865828, 0.14644661, 0.03806023, ]) self.assertTrue(np.allclose(window, expected)) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_spectrogram_impulse(self): waveform = np.zeros(40) waveform[9] = 1.0 spec = spectrogram( waveform, window_function(12, "hann", frame_length=16), frame_length=16, hop_length=4, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (9, 11)) expected = np.array([[0.0, 0.0669873, 0.9330127, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) self.assertTrue(np.allclose(spec, expected)) def test_spectrogram_integration_test(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (257, 732)) expected = np.array([ 0.02464888, 0.04648664, 0.05872392, 0.02311783, 0.0327175 , 0.02433643, 0.01198814, 0.02055709, 0.01559287, 0.01394357, 0.01299037, 0.01728045, 0.0254554 , 0.02486533, 0.02011792, 0.01755333, 0.02100457, 0.02337024, 0.01436963, 0.01464558, 0.0211017 , 0.0193489 , 0.01272165, 0.01858462, 0.03722598, 0.0456542 , 0.03281558, 0.00620586, 0.02226466, 0.03618042, 0.03508182, 0.02271432, 0.01051649, 0.01225771, 0.02315293, 0.02331886, 0.01417785, 0.0106844 , 0.01791214, 0.017177 , 0.02125114, 0.05028201, 0.06830665, 0.05216664, 0.01963666, 0.06941418, 0.11513043, 0.12257859, 0.10948435, 0.08568069, 0.05509328, 0.05047818, 0.047112 , 0.05060737, 0.02982424, 0.02803827, 0.02933729, 0.01760491, 0.00587815, 0.02117637, 0.0293578 , 0.03452379, 0.02194803, 0.01676056, ]) self.assertTrue(np.allclose(spec[:64, 400], expected)) spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, fft_length=512, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (257, 732)) self.assertTrue(np.allclose(spec[:64, 400], expected)) mel_filters = mel_filter_bank( num_frequency_bins=256, num_mel_filters=400, min_frequency=20, max_frequency=8000, sampling_rate=16000, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) spec = spectrogram( waveform, window_function(400, "povey", periodic=False), frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, pad_mode="reflect", onesided=True, preemphasis=0.97, mel_filters=mel_filters, log_mel="log", mel_floor=1.1920928955078125e-07, remove_dc_offset=True, ) self.assertEqual(spec.shape, (400, 584)) expected = np.array([-15.94238515, -8.20712299, -8.22704352, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -6.52463769, -7.73677889, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.18650018, -3.37195286, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.70190154, -2.4217066 , -15.94238515, -15.94238515, -15.94238515, -15.94238515, -5.62755239, -3.53385194, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -9.43303023, -8.77480925, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.2951092 , -5.51585994, -15.94238515, -15.94238515, -15.94238515, -4.40151721, -3.95228878, -15.94238515, -15.94238515, -15.94238515, -6.10365415, -4.59494697, -15.94238515, -15.94238515, -15.94238515, -8.10727767, -6.2585298 , -15.94238515, -15.94238515, -15.94238515, -5.60161702, -4.47217004, -15.94238515, -15.94238515, -15.94238515, -5.91641988] ) self.assertTrue(np.allclose(spec[:64, 400], expected, atol=1e-5)) def test_spectrogram_center_padding(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=128, center=True, pad_mode="reflect", ) self.assertEqual(spec.shape, (257, 732)) expected = np.array([ 0.1287945 , 0.12792738, 0.08311573, 0.03155122, 0.02470202, 0.00727857, 0.00910694, 0.00686163, 0.01238981, 0.01473668, 0.00336144, 0.00370314, 0.00600871, 0.01120164, 0.01942998, 0.03132008, 0.0232842 , 0.01124642, 0.02754783, 0.02423725, 0.00147893, 0.00038027, 0.00112299, 0.00596233, 0.00571529, 0.02084235, 0.0231855 , 0.00810006, 0.01837943, 0.00651339, 0.00093931, 0.00067426, 0.01058399, 0.01270507, 0.00151734, 0.00331913, 0.00302416, 0.01081792, 0.00754549, 0.00148963, 0.00111943, 0.00152573, 0.00608017, 0.01749986, 0.01205949, 0.0143082 , 0.01910573, 0.00413786, 0.03916619, 0.09873404, 0.08302026, 0.02673891, 0.00401255, 0.01397392, 0.00751862, 0.01024884, 0.01544606, 0.00638907, 0.00623633, 0.0085103 , 0.00217659, 0.00276204, 0.00260835, 0.00299299, ]) self.assertTrue(np.allclose(spec[:64, 0], expected)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=128, center=True, pad_mode="constant", ) self.assertEqual(spec.shape, (257, 732)) expected = np.array([ 0.06558744, 0.06889656, 0.06263352, 0.04264418, 0.03404115, 0.03244197, 0.02279134, 0.01646339, 0.01452216, 0.00826055, 0.00062093, 0.0031821 , 0.00419456, 0.00689327, 0.01106367, 0.01712119, 0.01721762, 0.00977533, 0.01606626, 0.02275621, 0.01727687, 0.00992739, 0.01217688, 0.01049927, 0.01022947, 0.01302475, 0.01166873, 0.01081812, 0.01057327, 0.00767912, 0.00429567, 0.00089625, 0.00654583, 0.00912084, 0.00700984, 0.00225026, 0.00290545, 0.00667712, 0.00730663, 0.00410813, 0.00073102, 0.00219296, 0.00527618, 0.00996585, 0.01123781, 0.00872816, 0.01165121, 0.02047945, 0.03681747, 0.0514379 , 0.05137928, 0.03960042, 0.02821562, 0.01813349, 0.01201322, 0.01260964, 0.00900654, 0.00207905, 0.00456714, 0.00850599, 0.00788239, 0.00664407, 0.00824227, 0.00628301, ]) self.assertTrue(np.allclose(spec[:64, 0], expected)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=128, center=False, ) self.assertEqual(spec.shape, (257, 728)) expected = np.array([ 0.00250445, 0.02161521, 0.06232229, 0.04339567, 0.00937727, 0.01080616, 0.00248685, 0.0095264 , 0.00727476, 0.0079152 , 0.00839946, 0.00254932, 0.00716622, 0.005559 , 0.00272623, 0.00581774, 0.01896395, 0.01829788, 0.01020514, 0.01632692, 0.00870888, 0.02065827, 0.0136022 , 0.0132382 , 0.011827 , 0.00194505, 0.0189979 , 0.026874 , 0.02194014, 0.01923883, 0.01621437, 0.00661967, 0.00289517, 0.00470257, 0.00957801, 0.00191455, 0.00431664, 0.00544359, 0.01126213, 0.00785778, 0.00423469, 0.01322504, 0.02226548, 0.02318576, 0.03428908, 0.03648811, 0.0202938 , 0.011902 , 0.03226198, 0.06347476, 0.01306318, 0.05308729, 0.05474771, 0.03127991, 0.00998512, 0.01449977, 0.01272741, 0.00868176, 0.00850386, 0.00313876, 0.00811857, 0.00538216, 0.00685749, 0.00535275, ]) self.assertTrue(np.allclose(spec[:64, 0], expected)) def test_spectrogram_shapes(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (201, 732)) spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, power=1.0, center=False, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (201, 729)) spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, fft_length=512, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (257, 732)) spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=64, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec.shape, (512, 1464)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=64, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec.shape, (512, 1464)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=512, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec.shape, (512, 183)) def test_mel_spectrogram(self): waveform = self._load_datasamples(1)[0] mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm=None, mel_scale="htk", ) self.assertEqual(mel_filters.shape, (513, 13)) spec = spectrogram( waveform, window_function(800, "hann", frame_length=1024), frame_length=1024, hop_length=128, power=2.0, ) self.assertEqual(spec.shape, (513, 732)) spec = spectrogram( waveform, window_function(800, "hann", frame_length=1024), frame_length=1024, hop_length=128, power=2.0, mel_filters=mel_filters, ) self.assertEqual(spec.shape, (13, 732)) expected = np.array([ 1.08027889e+02, 1.48080673e+01, 7.70758213e+00, 9.57676639e-01, 8.81639061e-02, 5.26073833e-02, 1.52736155e-02, 9.95350117e-03, 7.95364356e-03, 1.01148004e-02, 4.29241020e-03, 9.90708797e-03, 9.44153646e-04 ]) self.assertTrue(np.allclose(spec[:, 300], expected)) def test_spectrogram_power(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=None, ) self.assertEqual(spec.shape, (257, 732)) self.assertEqual(spec.dtype, np.complex64) expected = np.array([ 0.01452305+0.01820039j, -0.01737362-0.01641946j, 0.0121028 +0.01565081j, -0.02794554-0.03021514j, 0.04719803+0.04086519j, -0.04391563-0.02779365j, 0.05682834+0.01571325j, -0.08604821-0.02023657j, 0.07497991+0.0186641j , -0.06366091-0.00922475j, 0.11003416+0.0114788j , -0.13677941-0.01523552j, 0.10934535-0.00117226j, -0.11635598+0.02551187j, 0.14708674-0.03469823j, -0.1328196 +0.06034218j, 0.12667368-0.13973421j, -0.14764774+0.18912019j, 0.10235471-0.12181523j, -0.00773012+0.04730498j, -0.01487191-0.07312611j, -0.02739162+0.09619419j, 0.02895459-0.05398273j, 0.01198589+0.05276592j, -0.02117299-0.10123465j, 0.00666388+0.09526499j, -0.01672773-0.05649684j, 0.02723125+0.05939891j, -0.01879361-0.062954j , 0.03686557+0.04568823j, -0.07394181-0.07949649j, 0.06238583+0.13905765j, ]) self.assertTrue(np.allclose(spec[64:96, 321], expected)) spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=1.0, ) self.assertEqual(spec.shape, (257, 732)) self.assertEqual(spec.dtype, np.float64) expected = np.array([ 0.02328461, 0.02390484, 0.01978448, 0.04115711, 0.0624309 , 0.05197181, 0.05896072, 0.08839577, 0.07726794, 0.06432579, 0.11063128, 0.13762532, 0.10935163, 0.11911998, 0.15112405, 0.14588428, 0.18860507, 0.23992978, 0.15910825, 0.04793241, 0.07462307, 0.10001811, 0.06125769, 0.05411011, 0.10342509, 0.09549777, 0.05892122, 0.06534349, 0.06569936, 0.05870678, 0.10856833, 0.1524107 , 0.11463385, 0.05766969, 0.12385171, 0.14472842, 0.11978184, 0.10353675, 0.07244056, 0.03461861, 0.02624896, 0.02227475, 0.01238363, 0.00885281, 0.0110049 , 0.00807005, 0.01033663, 0.01703181, 0.01445856, 0.00585615, 0.0132431 , 0.02754132, 0.01524478, 0.0204908 , 0.07453328, 0.10716327, 0.07195779, 0.08816078, 0.18340898, 0.16449876, 0.12322842, 0.1621659 , 0.12334293, 0.06033659, ]) self.assertTrue(np.allclose(spec[64:128, 321], expected)) spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=2.0, ) self.assertEqual(spec.shape, (257, 732)) self.assertEqual(spec.dtype, np.float64) expected = np.array([ 5.42173162e-04, 5.71441371e-04, 3.91425507e-04, 1.69390778e-03, 3.89761780e-03, 2.70106923e-03, 3.47636663e-03, 7.81381316e-03, 5.97033510e-03, 4.13780799e-03, 1.22392802e-02, 1.89407300e-02, 1.19577805e-02, 1.41895693e-02, 2.28384770e-02, 2.12822221e-02, 3.55718732e-02, 5.75663000e-02, 2.53154356e-02, 2.29751552e-03, 5.56860259e-03, 1.00036217e-02, 3.75250424e-03, 2.92790355e-03, 1.06967501e-02, 9.11982451e-03, 3.47171025e-03, 4.26977174e-03, 4.31640586e-03, 3.44648538e-03, 1.17870830e-02, 2.32290216e-02, 1.31409196e-02, 3.32579296e-03, 1.53392460e-02, 2.09463164e-02, 1.43476883e-02, 1.07198600e-02, 5.24763530e-03, 1.19844836e-03, 6.89007982e-04, 4.96164430e-04, 1.53354369e-04, 7.83722571e-05, 1.21107812e-04, 6.51257360e-05, 1.06845939e-04, 2.90082477e-04, 2.09049831e-04, 3.42945241e-05, 1.75379610e-04, 7.58524227e-04, 2.32403356e-04, 4.19872697e-04, 5.55520924e-03, 1.14839673e-02, 5.17792348e-03, 7.77232368e-03, 3.36388536e-02, 2.70598419e-02, 1.51852425e-02, 2.62977779e-02, 1.52134784e-02, 3.64050455e-03, ]) self.assertTrue(np.allclose(spec[64:128, 321], expected)) def test_power_to_db(self): spectrogram = np.zeros((2, 3)) spectrogram[0, 0] = 2.0 spectrogram[0, 1] = 0.5 spectrogram[0, 2] = 0.707 spectrogram[1, 1] = 1.0 output = power_to_db(spectrogram, reference=1.0) expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-100.0, 0.0, -100.0]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, reference=2.0) expected = np.array([[0.0, -6.02059991, -4.51610582], [-103.01029996, -3.01029996, -103.01029996]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, min_value=1e-6) expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-60.0, 0.0, -60.0]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, db_range=80) expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-76.98970004, 0.0, -76.98970004]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, reference=2.0, db_range=80) expected = np.array([[0.0, -6.02059991, -4.51610582], [-80.0, -3.01029996, -80.0]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, reference=2.0, min_value=1e-6, db_range=80) expected = np.array([[0.0, -6.02059991, -4.51610582], [-63.01029996, -3.01029996, -63.01029996]]) self.assertTrue(np.allclose(output, expected)) with pytest.raises(ValueError): power_to_db(spectrogram, reference=0.0) with pytest.raises(ValueError): power_to_db(spectrogram, min_value=0.0) with pytest.raises(ValueError): power_to_db(spectrogram, db_range=-80) def test_amplitude_to_db(self): spectrogram = np.zeros((2, 3)) spectrogram[0, 0] = 2.0 spectrogram[0, 1] = 0.5 spectrogram[0, 2] = 0.707 spectrogram[1, 1] = 1.0 output = amplitude_to_db(spectrogram, reference=1.0) expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-100.0, 0.0, -100.0]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, reference=2.0) expected = np.array([[0.0, -12.04119983, -9.03221164], [-106.02059991, -6.02059991, -106.02059991]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, min_value=1e-3) expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-60.0, 0.0, -60.0]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, db_range=80) expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-73.97940009, 0.0, -73.97940009]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, reference=2.0, db_range=80) expected = np.array([[0.0, -12.04119983, -9.03221164], [-80.0, -6.02059991, -80.0]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, reference=2.0, min_value=1e-3, db_range=80) expected = np.array([[0.0, -12.04119983, -9.03221164], [-66.02059991, -6.02059991, -66.02059991]]) self.assertTrue(np.allclose(output, expected)) with pytest.raises(ValueError): amplitude_to_db(spectrogram, reference=0.0) with pytest.raises(ValueError): amplitude_to_db(spectrogram, min_value=0.0) with pytest.raises(ValueError): amplitude_to_db(spectrogram, db_range=-80)
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license defaults to last layer if both are none out indices set to match out features out features set to match out indices out features selected from negative indices stage names must be set out features must be a list out features must be a subset of stage names out indices must be a list or tuple out indices must be a subset of stage names out features and out indices must be the same length out features should match out indices out features and out indices should be in order check passes with valid inputs check that the output features and indices are set correctly check out features and indices are updated correctly 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license defaults to last layer if both are none out indices set to match out features out features set to match out indices out features selected from negative indices stage names must be set out features must be a list out features must be a subset of stage names out indices must be a list or tuple out indices must be a subset of stage names out features and out indices must be the same length out features should match out indices out features and out indices should be in order check passes with valid inputs check that the output features and indices are set correctly check out features and indices are updated correctly
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class BackboneUtilsTester(unittest.TestCase): def test_get_aligned_output_features_output_indices(self): stage_names = ["a", "b", "c"] out_features, out_indices = get_aligned_output_features_output_indices(None, None, stage_names) self.assertEqual(out_features, ["c"]) self.assertEqual(out_indices, [2]) out_features, out_indices = get_aligned_output_features_output_indices(["a", "c"], None, stage_names) self.assertEqual(out_features, ["a", "c"]) self.assertEqual(out_indices, [0, 2]) out_features, out_indices = get_aligned_output_features_output_indices(None, [0, 2], stage_names) self.assertEqual(out_features, ["a", "c"]) self.assertEqual(out_indices, [0, 2]) out_features, out_indices = get_aligned_output_features_output_indices(None, [-3, -1], stage_names) self.assertEqual(out_features, ["a", "c"]) self.assertEqual(out_indices, [-3, -1]) def test_verify_out_features_out_indices(self): with self.assertRaises(ValueError): verify_out_features_out_indices(["a", "b"], (0, 1), None) with self.assertRaises(ValueError): verify_out_features_out_indices(("a", "b"), (0, 1), ["a", "b"]) with self.assertRaises(ValueError): verify_out_features_out_indices(["a", "b"], (0, 1), ["a"]) with self.assertRaises(ValueError): verify_out_features_out_indices(None, 0, ["a", "b"]) with self.assertRaises(ValueError): verify_out_features_out_indices(None, (0, 1), ["a"]) with self.assertRaises(ValueError): verify_out_features_out_indices(["a", "b"], (0,), ["a", "b", "c"]) with self.assertRaises(ValueError): verify_out_features_out_indices(["a", "b"], (0, 2), ["a", "b", "c"]) with self.assertRaises(ValueError): verify_out_features_out_indices(["b", "a"], (0, 1), ["a", "b"]) verify_out_features_out_indices(["a", "b", "d"], (0, 1, -1), ["a", "b", "c", "d"]) def test_backbone_mixin(self): backbone = BackboneMixin() backbone.stage_names = ["a", "b", "c"] backbone._out_features = ["a", "c"] backbone._out_indices = [0, 2] self.assertEqual(backbone.out_features, ["a", "c"]) self.assertEqual(backbone.out_indices, [0, 2]) backbone.out_features = ["a", "b"] self.assertEqual(backbone.out_features, ["a", "b"]) self.assertEqual(backbone.out_indices, [0, 1]) backbone.out_indices = [-3, -1] self.assertEqual(backbone.out_features, ["a", "c"]) self.assertEqual(backbone.out_indices, [-3, -1])
codingutf8 2019present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test transformerscli env remove any previously downloaded model to start clean run the command check if the model files are downloaded correctly on tmpmodelshfinternaltestingtinyrandomgptj remove any previously downloaded model to start clean run the command check if the model files are downloaded correctly on tmpmodelshfinternaltestingtestdynamicmodelwithtokenizer coding utf 8 2019 present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test transformers cli env cleans potential past runs remove any previously downloaded model to start clean run the command check if the model files are downloaded correctly on tmp models hf internal testing tiny random gptj remove any previously downloaded model to start clean run the command check if the model files are downloaded correctly on tmp models hf internal testing test_dynamic_model_with_tokenizer
import os import shutil import unittest from unittest.mock import patch from transformers.testing_utils import CaptureStd, is_pt_tf_cross_test, require_torch class CLITest(unittest.TestCase): @patch("sys.argv", ["fakeprogrampath", "env"]) def test_cli_env(self): import transformers.commands.transformers_cli with CaptureStd() as cs: transformers.commands.transformers_cli.main() self.assertIn("Python version", cs.out) self.assertIn("Platform", cs.out) self.assertIn("Using distributed or parallel set-up in script?", cs.out) @is_pt_tf_cross_test @patch( "sys.argv", ["fakeprogrampath", "pt-to-tf", "--model-name", "hf-internal-testing/tiny-random-gptj", "--no-pr"] ) def test_cli_pt_to_tf(self): import transformers.commands.transformers_cli shutil.rmtree("/tmp/hf-internal-testing/tiny-random-gptj", ignore_errors=True) transformers.commands.transformers_cli.main() self.assertTrue(os.path.exists("/tmp/hf-internal-testing/tiny-random-gptj/tf_model.h5")) @require_torch @patch("sys.argv", ["fakeprogrampath", "download", "hf-internal-testing/tiny-random-gptj", "--cache-dir", "/tmp"]) def test_cli_download(self): import transformers.commands.transformers_cli shutil.rmtree("/tmp/models--hf-internal-testing--tiny-random-gptj", ignore_errors=True) transformers.commands.transformers_cli.main() self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/blobs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/refs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/snapshots")) @require_torch @patch( "sys.argv", [ "fakeprogrampath", "download", "hf-internal-testing/test_dynamic_model_with_tokenizer", "--trust-remote-code", "--cache-dir", "/tmp", ], ) def test_cli_download_trust_remote(self): import transformers.commands.transformers_cli shutil.rmtree("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer", ignore_errors=True) transformers.commands.transformers_cli.main() self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/blobs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/refs")) self.assertTrue( os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/snapshots") )
codingutf8 2019present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license runs through the specific directory looking for the files identified with identifier executes the doctests in those files args directory path directory containing the files identifier str will parse files containing this ignorefiles liststr list of files to skip nidentifier str or liststr will not parse files containing thisthese identifiers onlymodules bool whether to only analyze modules open all files coding utf 8 2019 present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license runs through the specific directory looking for the files identified with identifier executes the doctests in those files args directory path directory containing the files identifier str will parse files containing this ignore_files list str list of files to skip n_identifier str or list str will not parse files containing this these identifiers only_modules bool whether to only analyze modules open all files
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow logger = logging.getLogger() @unittest.skip("Temporarily disable the doc tests.") @require_torch @require_tf @slow class TestCodeExamples(unittest.TestCase): def analyze_directory( self, directory: Path, identifier: Union[str, None] = None, ignore_files: Union[List[str], None] = None, n_identifier: Union[str, List[str], None] = None, only_modules: bool = True, ): files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))] if identifier is not None: files = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(n_identifier, List): for n_ in n_identifier: files = [file for file in files if n_ not in file] else: files = [file for file in files if n_identifier not in file] ignore_files = ignore_files or [] ignore_files.append("__init__.py") files = [file for file in files if file not in ignore_files] for file in files: print("Testing", file) if only_modules: module_identifier = file.split(".")[0] try: module_identifier = getattr(transformers, module_identifier) suite = doctest.DocTestSuite(module_identifier) result = unittest.TextTestRunner().run(suite) self.assertIs(len(result.failures), 0) except AttributeError: logger.info(f"{module_identifier} is not a module.") else: result = doctest.testfile(str(".." / directory / file), optionflags=doctest.ELLIPSIS) self.assertIs(result.failed, 0) def test_modeling_examples(self): transformers_directory = Path("src/transformers") files = "modeling" ignore_files = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(transformers_directory, identifier=files, ignore_files=ignore_files) def test_tokenization_examples(self): transformers_directory = Path("src/transformers") files = "tokenization" self.analyze_directory(transformers_directory, identifier=files) def test_configuration_examples(self): transformers_directory = Path("src/transformers") files = "configuration" self.analyze_directory(transformers_directory, identifier=files) def test_remaining_examples(self): transformers_directory = Path("src/transformers") n_identifiers = ["configuration", "modeling", "tokenization"] self.analyze_directory(transformers_directory, n_identifier=n_identifiers) def test_doc_sources(self): doc_source_directory = Path("docs/source") ignore_files = ["favicon.ico"] self.analyze_directory(doc_source_directory, ignore_files=ignore_files, only_modules=False)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license try to import everything from transformers to ensure every object can be loaded an actual model hosted on huggingface co default branch name one particular commit not the top of main this commit does not exist so we should 404 sha1 of config json on the top of main for checking purposes sha256 of pytorchmodel bin on the top of main for checking purposes dummy contexts to test contextmanagers if the spec is missing importlib would not be able to import the module dynamically the print statement adds a new line at the end of the output the output should be wrapped with an english welcome and goodbye the output should be wrapped with an english and french welcome and goodbye findlabels works regardless of the class name it detects the framework through inheritance findlabels works regardless of the class name it detects the framework through inheritance flax models don t have labels findlabels works regardless of the class name it detects the framework through inheritance 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license try to import everything from transformers to ensure every object can be loaded noqa f406 an actual model hosted on huggingface co default branch name one particular commit not the top of main this commit does not exist so we should 404 sha 1 of config json on the top of main for checking purposes sha 256 of pytorch_model bin on the top of main for checking purposes dummy contexts to test contextmanagers if the spec is missing importlib would not be able to import the module dynamically the print statement adds a new line at the end of the output the output should be wrapped with an english welcome and goodbye the output should be wrapped with an english and french welcome and goodbye find_labels works regardless of the class name it detects the framework through inheritance find_labels works regardless of the class name it detects the framework through inheritance flax models don t have labels find_labels works regardless of the class name it detects the framework through inheritance
import contextlib import importlib import io import unittest import transformers from transformers import * from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification MODEL_ID = DUMMY_UNKNOWN_IDENTIFIER REVISION_ID_DEFAULT = "main" REVISION_ID_ONE_SPECIFIC_COMMIT = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2" REVISION_ID_INVALID = "aaaaaaa" PINNED_SHA1 = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684" PINNED_SHA256 = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3" @contextlib.contextmanager def context_en(): print("Welcome!") yield print("Bye!") @contextlib.contextmanager def context_fr(): print("Bonjour!") yield print("Au revoir!") class TestImportMechanisms(unittest.TestCase): def test_module_spec_available(self): assert transformers.__spec__ is not None assert importlib.util.find_spec("transformers") is not None class GenericUtilTests(unittest.TestCase): @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_context_managers_no_context(self, mock_stdout): with ContextManagers([]): print("Transformers are awesome!") self.assertEqual(mock_stdout.getvalue(), "Transformers are awesome!\n") @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_context_managers_one_context(self, mock_stdout): with ContextManagers([context_en()]): print("Transformers are awesome!") self.assertEqual(mock_stdout.getvalue(), "Welcome!\nTransformers are awesome!\nBye!\n") @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_context_managers_two_context(self, mock_stdout): with ContextManagers([context_fr(), context_en()]): print("Transformers are awesome!") self.assertEqual(mock_stdout.getvalue(), "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n") @require_torch def test_find_labels_pt(self): self.assertEqual(find_labels(BertForSequenceClassification), ["labels"]) self.assertEqual(find_labels(BertForPreTraining), ["labels", "next_sentence_label"]) self.assertEqual(find_labels(BertForQuestionAnswering), ["start_positions", "end_positions"]) class DummyModel(BertForSequenceClassification): pass self.assertEqual(find_labels(DummyModel), ["labels"]) @require_tf def test_find_labels_tf(self): self.assertEqual(find_labels(TFBertForSequenceClassification), ["labels"]) self.assertEqual(find_labels(TFBertForPreTraining), ["labels", "next_sentence_label"]) self.assertEqual(find_labels(TFBertForQuestionAnswering), ["start_positions", "end_positions"]) class DummyModel(TFBertForSequenceClassification): pass self.assertEqual(find_labels(DummyModel), ["labels"]) @require_flax def test_find_labels_flax(self): self.assertEqual(find_labels(FlaxBertForSequenceClassification), []) self.assertEqual(find_labels(FlaxBertForPreTraining), []) self.assertEqual(find_labels(FlaxBertForQuestionAnswering), []) class DummyModel(FlaxBertForSequenceClassification): pass self.assertEqual(find_labels(DummyModel), [])
codingutf8 2019present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2019 present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class GenericTester(unittest.TestCase): def test_flatten_dict(self): input_dict = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } expected_dict = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(input_dict), expected_dict) def test_transpose_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(transpose(x), x.transpose())) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), x.transpose((1, 2, 0)))) @require_torch def test_transpose_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_tf def test_transpose_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_flax def test_transpose_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x), np.asarray(transpose(t)))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), np.asarray(transpose(t, axes=(1, 2, 0))))) def test_reshape_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.reshape(x, (4, 3)))) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.reshape(x, (12, 5)))) @require_torch def test_reshape_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_tf def test_reshape_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_flax def test_reshape_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.asarray(reshape(t, (4, 3))))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.asarray(reshape(t, (12, 5))))) def test_squeeze_numpy(self): x = np.random.randn(1, 3, 4) self.assertTrue(np.allclose(squeeze(x), np.squeeze(x))) x = np.random.randn(1, 4, 1, 5) self.assertTrue(np.allclose(squeeze(x, axis=2), np.squeeze(x, axis=2))) @require_torch def test_squeeze_torch(self): x = np.random.randn(1, 3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_tf def test_squeeze_tf(self): x = np.random.randn(1, 3, 4) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_flax def test_squeeze_flax(self): x = np.random.randn(1, 3, 4) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x), np.asarray(squeeze(t)))) x = np.random.randn(1, 4, 1, 5) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x, axis=2), np.asarray(squeeze(t, axis=2)))) def test_expand_dims_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.expand_dims(x, axis=1))) @require_torch def test_expand_dims_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_tf def test_expand_dims_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_flax def test_expand_dims_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.asarray(expand_dims(t, axis=1))))
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license since python 3 10 we can use the builtin operator for union types see pep 604 https peps python orgpep0604 small helper to check pseudoequality of parsed arguments on argumentparser instances choices with mixed type have custom function as type so we need to compare results directly for equality a boolean no argument always has to come after its default true regular counterpart and its default must be set to false 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license since python 3 10 we can use the builtin operator for union types see pep 604 https peps python org pep 0604 small helper to check pseudo equality of parsed arguments on argumentparser instances choices with mixed type have custom function as type so we need to compare results directly for equality a boolean no_ argument always has to come after its default true regular counter part and its default must be set to false
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool is_python_no_less_than_3_10 = sys.version_info >= (3, 10) def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata) @dataclass class BasicExample: foo: int bar: float baz: str flag: bool @dataclass class WithDefaultExample: foo: int = 42 baz: str = field(default="toto", metadata={"help": "help message"}) @dataclass class WithDefaultBoolExample: foo: bool = False baz: bool = True opt: Optional[bool] = None class BasicEnum(Enum): titi = "titi" toto = "toto" class MixedTypeEnum(Enum): titi = "titi" toto = "toto" fourtytwo = 42 @dataclass class EnumExample: foo: BasicEnum = "toto" def __post_init__(self): self.foo = BasicEnum(self.foo) @dataclass class MixedTypeEnumExample: foo: MixedTypeEnum = "toto" def __post_init__(self): self.foo = MixedTypeEnum(self.foo) @dataclass class OptionalExample: foo: Optional[int] = None bar: Optional[float] = field(default=None, metadata={"help": "help message"}) baz: Optional[str] = None ces: Optional[List[str]] = list_field(default=[]) des: Optional[List[int]] = list_field(default=[]) @dataclass class ListExample: foo_int: List[int] = list_field(default=[]) bar_int: List[int] = list_field(default=[1, 2, 3]) foo_str: List[str] = list_field(default=["Hallo", "Bonjour", "Hello"]) foo_float: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class RequiredExample: required_list: List[int] = field() required_str: str = field() required_enum: BasicEnum = field() def __post_init__(self): self.required_enum = BasicEnum(self.required_enum) @dataclass class StringLiteralAnnotationExample: foo: int required_enum: "BasicEnum" = field() opt: "Optional[bool]" = None baz: "str" = field(default="toto", metadata={"help": "help message"}) foo_str: "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"]) if is_python_no_less_than_3_10: @dataclass class WithDefaultBoolExamplePep604: foo: bool = False baz: bool = True opt: bool | None = None @dataclass class OptionalExamplePep604: foo: int | None = None bar: float | None = field(default=None, metadata={"help": "help message"}) baz: str | None = None ces: list[str] | None = list_field(default=[]) des: list[int] | None = list_field(default=[]) class HfArgumentParserTest(unittest.TestCase): def argparsersEqual(self, a: argparse.ArgumentParser, b: argparse.ArgumentParser): self.assertEqual(len(a._actions), len(b._actions)) for x, y in zip(a._actions, b._actions): xx = {k: v for k, v in vars(x).items() if k != "container"} yy = {k: v for k, v in vars(y).items() if k != "container"} if xx.get("choices", None) and yy.get("choices", None): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](expected_choice), yy["type"](expected_choice)) del xx["type"], yy["type"] self.assertEqual(xx, yy) def test_basic(self): parser = HfArgumentParser(BasicExample) expected = argparse.ArgumentParser() expected.add_argument("--foo", type=int, required=True) expected.add_argument("--bar", type=float, required=True) expected.add_argument("--baz", type=str, required=True) expected.add_argument("--flag", type=string_to_bool, default=False, const=True, nargs="?") self.argparsersEqual(parser, expected) args = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] (example,) = parser.parse_args_into_dataclasses(args, look_for_args_file=False) self.assertFalse(example.flag) def test_with_default(self): parser = HfArgumentParser(WithDefaultExample) expected = argparse.ArgumentParser() expected.add_argument("--foo", default=42, type=int) expected.add_argument("--baz", default="toto", type=str, help="help message") self.argparsersEqual(parser, expected) def test_with_default_bool(self): expected = argparse.ArgumentParser() expected.add_argument("--foo", type=string_to_bool, default=False, const=True, nargs="?") expected.add_argument("--baz", type=string_to_bool, default=True, const=True, nargs="?") expected.add_argument("--no_baz", action="store_false", default=False, dest="baz") expected.add_argument("--opt", type=string_to_bool, default=None) dataclass_types = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(WithDefaultBoolExamplePep604) for dataclass_type in dataclass_types: parser = HfArgumentParser(dataclass_type) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args, Namespace(foo=False, baz=True, opt=None)) args = parser.parse_args(["--foo", "--no_baz"]) self.assertEqual(args, Namespace(foo=True, baz=False, opt=None)) args = parser.parse_args(["--foo", "--baz"]) self.assertEqual(args, Namespace(foo=True, baz=True, opt=None)) args = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"]) self.assertEqual(args, Namespace(foo=True, baz=True, opt=True)) args = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"]) self.assertEqual(args, Namespace(foo=False, baz=False, opt=False)) def test_with_enum(self): parser = HfArgumentParser(MixedTypeEnumExample) expected = argparse.ArgumentParser() expected.add_argument( "--foo", default="toto", choices=["titi", "toto", 42], type=make_choice_type_function(["titi", "toto", 42]), ) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args.foo, "toto") enum_ex = parser.parse_args_into_dataclasses([])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.toto) args = parser.parse_args(["--foo", "titi"]) self.assertEqual(args.foo, "titi") enum_ex = parser.parse_args_into_dataclasses(["--foo", "titi"])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.titi) args = parser.parse_args(["--foo", "42"]) self.assertEqual(args.foo, 42) enum_ex = parser.parse_args_into_dataclasses(["--foo", "42"])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.fourtytwo) def test_with_literal(self): @dataclass class LiteralExample: foo: Literal["titi", "toto", 42] = "toto" parser = HfArgumentParser(LiteralExample) expected = argparse.ArgumentParser() expected.add_argument( "--foo", default="toto", choices=("titi", "toto", 42), type=make_choice_type_function(["titi", "toto", 42]), ) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args.foo, "toto") args = parser.parse_args(["--foo", "titi"]) self.assertEqual(args.foo, "titi") args = parser.parse_args(["--foo", "42"]) self.assertEqual(args.foo, 42) def test_with_list(self): parser = HfArgumentParser(ListExample) expected = argparse.ArgumentParser() expected.add_argument("--foo_int", nargs="+", default=[], type=int) expected.add_argument("--bar_int", nargs="+", default=[1, 2, 3], type=int) expected.add_argument("--foo_str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=str) expected.add_argument("--foo_float", nargs="+", default=[0.1, 0.2, 0.3], type=float) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual( args, Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=["Hallo", "Bonjour", "Hello"], foo_float=[0.1, 0.2, 0.3]), ) args = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split()) self.assertEqual(args, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=["a", "b", "c"], foo_float=[0.1, 0.7])) def test_with_optional(self): expected = argparse.ArgumentParser() expected.add_argument("--foo", default=None, type=int) expected.add_argument("--bar", default=None, type=float, help="help message") expected.add_argument("--baz", default=None, type=str) expected.add_argument("--ces", nargs="+", default=[], type=str) expected.add_argument("--des", nargs="+", default=[], type=int) dataclass_types = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(OptionalExamplePep604) for dataclass_type in dataclass_types: parser = HfArgumentParser(dataclass_type) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args, Namespace(foo=None, bar=None, baz=None, ces=[], des=[])) args = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split()) self.assertEqual(args, Namespace(foo=12, bar=3.14, baz="42", ces=["a", "b", "c"], des=[1, 2, 3])) def test_with_required(self): parser = HfArgumentParser(RequiredExample) expected = argparse.ArgumentParser() expected.add_argument("--required_list", nargs="+", type=int, required=True) expected.add_argument("--required_str", type=str, required=True) expected.add_argument( "--required_enum", type=make_choice_type_function(["titi", "toto"]), choices=["titi", "toto"], required=True, ) self.argparsersEqual(parser, expected) def test_with_string_literal_annotation(self): parser = HfArgumentParser(StringLiteralAnnotationExample) expected = argparse.ArgumentParser() expected.add_argument("--foo", type=int, required=True) expected.add_argument( "--required_enum", type=make_choice_type_function(["titi", "toto"]), choices=["titi", "toto"], required=True, ) expected.add_argument("--opt", type=string_to_bool, default=None) expected.add_argument("--baz", default="toto", type=str, help="help message") expected.add_argument("--foo_str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=str) self.argparsersEqual(parser, expected) def test_parse_dict(self): parser = HfArgumentParser(BasicExample) args_dict = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } parsed_args = parser.parse_dict(args_dict)[0] args = BasicExample(**args_dict) self.assertEqual(parsed_args, args) def test_parse_dict_extra_key(self): parser = HfArgumentParser(BasicExample) args_dict = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(ValueError, parser.parse_dict, args_dict, allow_extra_keys=False) def test_parse_json(self): parser = HfArgumentParser(BasicExample) args_dict_for_json = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: temp_local_path = os.path.join(tmp_dir, "temp_json") os.mkdir(temp_local_path) with open(temp_local_path + ".json", "w+") as f: json.dump(args_dict_for_json, f) parsed_args = parser.parse_yaml_file(Path(temp_local_path + ".json"))[0] args = BasicExample(**args_dict_for_json) self.assertEqual(parsed_args, args) def test_parse_yaml(self): parser = HfArgumentParser(BasicExample) args_dict_for_yaml = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: temp_local_path = os.path.join(tmp_dir, "temp_yaml") os.mkdir(temp_local_path) with open(temp_local_path + ".yaml", "w+") as f: yaml.dump(args_dict_for_yaml, f) parsed_args = parser.parse_yaml_file(Path(temp_local_path + ".yaml"))[0] args = BasicExample(**args_dict_for_yaml) self.assertEqual(parsed_args, args) def test_integration_training_args(self): parser = HfArgumentParser(TrainingArguments) self.assertIsNotNone(parser)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license should have downloaded the file in here cache should contain at least those three subfolders file is cached at the same place the second time using a specific revision to test the full commit hash under the mock environment we get a 500 error when trying to reach the tokenizer this check we did call the fake head request getfilefromrepo returns none if the file does not exist the function raises if the repository does not exist the function raises if the revision does not exist the name is the cached name which is not very easy to test so instead we load the content test download file from a gated repo fails with correct message when not authenticated with self assertraisesregexenvironmenterror you are trying to access a gated repo all files except readme md are protected on a gated repo cachedfilegatedrepo gatedfile txt tokenfalse def testhasfilegatedrepoself all files except readme md are protected on a gated repo 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license should have downloaded the file in here cache should contain at least those three subfolders file is cached at the same place the second time using a specific revision to test the full commit hash under the mock environment we get a 500 error when trying to reach the tokenizer this check we did call the fake head request get_file_from_repo returns none if the file does not exist the function raises if the repository does not exist the function raises if the revision does not exist the name is the cached name which is not very easy to test so instead we load the content test download file from a gated repo fails with correct message when not authenticated all files except readme md are protected on a gated repo test check file existence from a gated repo fails with correct message when not authenticated all files except readme md are protected on a gated repo
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) RANDOM_BERT = "hf-internal-testing/tiny-random-bert" CACHE_DIR = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") FULL_COMMIT_HASH = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" GATED_REPO = "hf-internal-testing/dummy-gated-model" README_FILE = "README.md" class GetFromCacheTests(unittest.TestCase): def test_cached_file(self): archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) self.assertTrue(os.path.isdir(CACHE_DIR)) for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(CACHE_DIR, subfolder))) with open(os.path.join(CACHE_DIR, "refs", "main")) as f: main_commit = f.read() self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", main_commit, CONFIG_NAME)) self.assertTrue(os.path.isfile(archive_file)) new_archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) self.assertEqual(archive_file, new_archive_file) archive_file = cached_file(RANDOM_BERT, CONFIG_NAME, revision="9b8c223") self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", FULL_COMMIT_HASH, CONFIG_NAME)) def test_cached_file_errors(self): with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): _ = cached_file("tiny-random-bert", CONFIG_NAME) with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): _ = cached_file(RANDOM_BERT, CONFIG_NAME, revision="aaaa") with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): _ = cached_file(RANDOM_BERT, "conf") def test_non_existence_is_cached(self): with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): _ = cached_file(RANDOM_BERT, "conf") with open(os.path.join(CACHE_DIR, "refs", "main")) as f: main_commit = f.read() self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, ".no_exist", main_commit, "conf"))) path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_missing_entries=False) self.assertIsNone(path) path = cached_file(RANDOM_BERT, "conf", local_files_only=True, _raise_exceptions_for_missing_entries=False) self.assertIsNone(path) response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_connection_errors=False) self.assertIsNone(path) mock_head.assert_called() def test_has_file(self): self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only", WEIGHTS_NAME)) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", TF2_WEIGHTS_NAME)) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", FLAX_WEIGHTS_NAME)) def test_get_file_from_repo_distant(self): self.assertIsNone(get_file_from_repo("bert-base-cased", "ahah.txt")) with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): get_file_from_repo("bert-base-case", CONFIG_NAME) with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): get_file_from_repo("bert-base-cased", CONFIG_NAME, revision="ahaha") resolved_file = get_file_from_repo("bert-base-cased", CONFIG_NAME) config = json.loads(open(resolved_file, "r").read()) self.assertEqual(config["hidden_size"], 768) def test_get_file_from_repo_local(self): with tempfile.TemporaryDirectory() as tmp_dir: filename = Path(tmp_dir) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(tmp_dir, "a.txt"), str(filename)) self.assertIsNone(get_file_from_repo(tmp_dir, "b.txt")) def test_get_file_gated_repo(self): with self.assertRaisesRegex(EnvironmentError, "You are trying to access a gated repo."): cached_file(GATED_REPO, "gated_file.txt", token=False) def test_has_file_gated_repo(self): with self.assertRaisesRegex(EnvironmentError, "is a gated repository"): has_file(GATED_REPO, "gated_file.txt", token=False)
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test a dict with the wrong keys raises an error test a dict with the correct keys is returned as is test a single int value which represents size size test a single int value which represents the shortest edge test a tuple of ints which represents height width test a tuple of ints which represents width height test an int representing the shortest edge and maxsize which represents the longest edge test int with defaulttosquaretrue and maxsize fails coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test a dict with the wrong keys raises an error test a dict with the correct keys is returned as is test a single int value which represents size size test a single int value which represents the shortest edge test a tuple of ints which represents height width test a tuple of ints which represents width height test an int representing the shortest edge and max_size which represents the longest edge test int with default_to_square true and max_size fails
import unittest from transformers.image_processing_utils import get_size_dict class ImageProcessingUtilsTester(unittest.TestCase): def test_get_size_dict(self): inputs = {"wrong_key": 224} with self.assertRaises(ValueError): get_size_dict(inputs) inputs = {"height": 224} with self.assertRaises(ValueError): get_size_dict(inputs) inputs = {"width": 224, "shortest_edge": 224} with self.assertRaises(ValueError): get_size_dict(inputs) inputs = {"height": 224, "width": 224} outputs = get_size_dict(inputs) self.assertEqual(outputs, inputs) inputs = {"shortest_edge": 224} outputs = get_size_dict(inputs) self.assertEqual(outputs, {"shortest_edge": 224}) inputs = {"longest_edge": 224, "shortest_edge": 224} outputs = get_size_dict(inputs) self.assertEqual(outputs, {"longest_edge": 224, "shortest_edge": 224}) outputs = get_size_dict(224) self.assertEqual(outputs, {"height": 224, "width": 224}) outputs = get_size_dict(224, default_to_square=False) self.assertEqual(outputs, {"shortest_edge": 224}) outputs = get_size_dict((150, 200)) self.assertEqual(outputs, {"height": 150, "width": 200}) outputs = get_size_dict((150, 200), height_width_order=False) self.assertEqual(outputs, {"height": 200, "width": 150}) outputs = get_size_dict(224, max_size=256, default_to_square=False) self.assertEqual(outputs, {"shortest_edge": 224, "longest_edge": 256}) with self.assertRaises(ValueError): get_size_dict(224, max_size=256, default_to_square=True)