Description
stringlengths
18
161k
Code
stringlengths
15
300k
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class TFDistilBertModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = False self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_distilbert_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDistilBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_distilbert_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDistilBertForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_distilbert_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDistilBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_distilbert_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDistilBertForSequenceClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_distilbert_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFDistilBertForMultipleChoice(config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_distilbert_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDistilBertForTokenClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFDistilBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) pipeline_model_mapping = ( { "feature-extraction": TFDistilBertModel, "fill-mask": TFDistilBertForMaskedLM, "question-answering": TFDistilBertForQuestionAnswering, "text-classification": TFDistilBertForSequenceClassification, "token-classification": TFDistilBertForTokenClassification, "zero-shot": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDistilBertModelTester(self) self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_distilbert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]): model = TFDistilBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFDistilBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFDistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 768] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [0.19261885, -0.13732955, 0.4119799], [0.22150156, -0.07422661, 0.39037204], [0.22756018, -0.0896414, 0.3701467], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class DistilBertTokenizationTest(BertTokenizationTest): tokenizer_class = DistilBertTokenizer rust_tokenizer_class = DistilBertTokenizerFast test_rust_tokenizer = True @slow def test_sequence_builders(self): tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ]
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license forward pass coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license forward pass
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class DiTIntegrationTest(unittest.TestCase): @slow def test_for_image_classification(self): image_processor = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip") model = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip") model.to(torch_device) from datasets import load_dataset dataset = load_dataset("nielsr/rvlcdip-demo") image = dataset["train"][0]["image"].convert("RGB") inputs = image_processor(image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 16)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [-0.4158, -0.4092, -0.4347], device=torch_device, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license previous config had dimensions in width height order initialize imageprocessing create random pil images test not batched input test batched initialize imageprocessing create random numpy tensors test not batched input test batched initialize imageprocessing create random pytorch tensors test not batched input test batched coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license previous config had dimensions in width height order initialize image_processing create random pil images test not batched input test batched initialize image_processing create random numpy tensors test not batched input test batched initialize image_processing create random pytorch tensors test not batched input test batched
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class DonutImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_thumbnail=True, do_align_axis=False, do_pad=True, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size if size is not None else {"height": 18, "width": 20} self.do_thumbnail = do_thumbnail self.do_align_axis = do_align_axis self.do_pad = do_pad self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DonutImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DonutImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = DonutImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_thumbnail")) self.assertTrue(hasattr(image_processing, "do_align_long_axis")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 20}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84)) self.assertEqual(image_processor.size, {"height": 84, "width": 42}) @is_flaky() def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), )
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch donut swin model import collections import unittest from transformers import donutswinconfig from transformers testingutils import requiretorch slow torchdevice from transformers utils import istorchavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import donutswinmodel from transformers models donut modelingdonutswin import donutswinpretrainedmodelarchivelist class donutswinmodeltester def init self parent batchsize13 imagesize32 patchsize2 numchannels3 embeddim16 depths1 2 1 numheads2 2 4 windowsize2 mlpratio2 0 qkvbiastrue hiddendropoutprob0 0 attentionprobsdropoutprob0 0 droppathrate0 1 hiddenactgelu useabsoluteembeddingsfalse patchnormtrue initializerrange0 02 layernormeps1e5 istrainingtrue scopenone uselabelstrue typesequencelabelsize10 encoderstride8 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self embeddim embeddim self depths depths self numheads numheads self windowsize windowsize self mlpratio mlpratio self qkvbias qkvbias self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self droppathrate droppathrate self hiddenact hiddenact self useabsoluteembeddings useabsoluteembeddings self patchnorm patchnorm self layernormeps layernormeps self initializerrange initializerrange self istraining istraining self scope scope self uselabels uselabels self typesequencelabelsize typesequencelabelsize self encoderstride encoderstride def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return donutswinconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels embeddimself embeddim depthsself depths numheadsself numheads windowsizeself windowsize mlpratioself mlpratio qkvbiasself qkvbias hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob droppathrateself droppathrate hiddenactself hiddenact useabsoluteembeddingsself useabsoluteembeddings pathnormself patchnorm layernormepsself layernormeps initializerrangeself initializerrange encoderstrideself encoderstride def createandcheckmodelself config pixelvalues labels model donutswinmodelconfigconfig model totorchdevice model eval result modelpixelvalues expectedseqlen config imagesize config patchsize 2 4 lenconfig depths 1 expecteddim intconfig embeddim 2 lenconfig depths 1 self parent assertequalresult lasthiddenstate shape self batchsize expectedseqlen expecteddim def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class donutswinmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses donutswinmodel if istorchavailable else pipelinemodelmapping featureextraction donutswinmodel if istorchavailable else fxcompatible true testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester donutswinmodeltesterself self configtester configtesterself configclassdonutswinconfig embeddim37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testinputsembedsself donutswin does not use inputsembeds pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions expectednumattentions lenself modeltester depths self assertequallenattentions expectednumattentions check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true windowsizesquared config windowsize2 model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions expectednumattentions self assertlistequal listattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes else also another 1 for reshapedhiddenstates addedhiddenstates 2 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions expectednumattentions self assertlistequal listselfattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared def checkhiddenstatesoutputself inputsdict config modelclass imagesize model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers lenself modeltester depths 1 self assertequallenhiddenstates expectednumlayers donutswin has a different seqlength patchsize config patchsize if isinstanceconfig patchsize collections abc iterable else config patchsize config patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self assertlistequal listhiddenstates0 shape2 numpatches self modeltester embeddim reshapedhiddenstates outputs reshapedhiddenstates self assertequallenreshapedhiddenstates expectednumlayers batchsize numchannels height width reshapedhiddenstates0 shape reshapedhiddenstates reshapedhiddenstates0 viewbatchsize numchannels height width permute0 2 1 self assertlistequal listreshapedhiddenstates shape2 numpatches self modeltester embeddim def testhiddenstatesoutputself config inputsdict self modeltester prepareconfigandinputsforcommon imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize for modelclass in self allmodelclasses inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize def testhiddenstatesoutputwithpaddingself config inputsdict self modeltester prepareconfigandinputsforcommon config patchsize 3 imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize patchsize config patchsize if isinstanceconfig patchsize collections abc iterable else config patchsize config patchsize paddedheight imagesize0 patchsize0 imagesize0 patchsize0 paddedwidth imagesize1 patchsize1 imagesize1 patchsize1 for modelclass in self allmodelclasses inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth slow def testmodelfrompretrainedself for modelname in donutswinpretrainedmodelarchivelist 1 model donutswinmodel frompretrainedmodelname self assertisnotnonemodel def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if embeddings not in name and param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch donut swin model donutswin does not use inputs_embeds check that output_attentions also work using config check attention is always last and order is fine also another 1 for reshaped_hidden_states donutswin has a different seq_length check that output_hidden_states also work using config check that output_hidden_states also work using config
import collections import unittest from transformers import DonutSwinConfig from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import DonutSwinModel from transformers.models.donut.modeling_donut_swin import DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST class DonutSwinModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return DonutSwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = DonutSwinModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DonutSwinModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": DonutSwinModel} if is_torch_available() else {} fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DonutSwinModelTester(self) self.config_tester = ConfigTester(self, config_class=DonutSwinConfig, embed_dim=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): for model_name in DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DonutSwinModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import DonutProcessor DONUT_PRETRAINED_MODEL_NAME = "naver-clova-ix/donut-base" class DonutProcessorTest(unittest.TestCase): def setUp(self): self.processor = DonutProcessor.from_pretrained(DONUT_PRETRAINED_MODEL_NAME) def test_token2json(self): expected_json = { "name": "John Doe", "age": "99", "city": "Atlanta", "state": "GA", "zip": "30301", "phone": "123-4567", "nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}], } sequence = ( "<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>" "<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>" "<s_nicknames><s_nickname>Johnny</s_nickname>" "<sep/><s_nickname>JD</s_nickname></s_nicknames>" ) actual_json = self.processor.token2json(sequence) self.assertDictEqual(actual_json, expected_json)
codingutf8 2020 huggingface licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license compare the actual values for a slice compare the actual values for a slice coding utf 8 2020 huggingface licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license why cls hello is my dog cute sep embedding shape 1 768 compare the actual values for a slice compare the actual values for a slice
import tempfile import unittest from transformers import DPRConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DPRContextEncoder, DPRQuestionEncoder, DPRReader, DPRReaderTokenizer from transformers.models.dpr.modeling_dpr import ( DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class DPRModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, projection_dim=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.projection_dim = projection_dim def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DPRConfig( projection_dim=self.projection_dim, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_context_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRContextEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_question_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRQuestionEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_reader( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRReader(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict @require_torch class DPRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DPRContextEncoder, DPRQuestionEncoder, DPRReader, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": DPRQuestionEncoder} if is_torch_available() else {} test_resize_embeddings = False test_missing_keys = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = DPRModelTester(self) self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_context_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_context_encoder(*config_and_inputs) def test_question_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_question_encoder(*config_and_inputs) def test_reader_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_reader(*config_and_inputs) def test_init_changed_config(self): config = self.model_tester.prepare_config_and_inputs()[0] model = DPRQuestionEncoder(config=config) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = DPRQuestionEncoder.from_pretrained(tmp_dirname, projection_dim=512) self.assertIsNotNone(model) @slow def test_model_from_pretrained(self): for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRQuestionEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRReader.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class DPRModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base", return_dict=False) model.to(torch_device) input_ids = torch.tensor( [[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]], dtype=torch.long, device=torch_device ) output = model(input_ids)[0] expected_slice = torch.tensor( [ [ 0.03236253, 0.12753335, 0.16818509, 0.00279786, 0.3896933, 0.24264945, 0.2178971, -0.02335227, -0.08481959, -0.14324117, ] ], dtype=torch.float, device=torch_device, ) self.assertTrue(torch.allclose(output[:, :10], expected_slice, atol=1e-4)) @slow def test_reader_inference(self): tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base") model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base") model.to(torch_device) encoded_inputs = tokenizer( questions="What is love ?", titles="Haddaway", texts="What Is Love is a song recorded by the artist Haddaway", padding=True, return_tensors="pt", ) encoded_inputs.to(torch_device) outputs = model(**encoded_inputs) expected_start_logits = torch.tensor( [[-10.3005, -10.7765, -11.4872, -11.6841, -11.9312, -10.3002, -9.8544, -11.7378, -12.0821, -10.2975]], dtype=torch.float, device=torch_device, ) expected_end_logits = torch.tensor( [[-11.0684, -11.7041, -11.5397, -10.3465, -10.8791, -6.8443, -11.9959, -11.0364, -10.0096, -6.8405]], dtype=torch.float, device=torch_device, ) self.assertTrue(torch.allclose(outputs.start_logits[:, :10], expected_start_logits, atol=1e-4)) self.assertTrue(torch.allclose(outputs.end_logits[:, :10], expected_end_logits, atol=1e-4))
codingutf8 2020 huggingface licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license follow testmodelingtfctrl py compare the actual values for a slice coding utf 8 2020 huggingface licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license follow test_modeling_tf_ctrl py cls hello is my dog cute sep embedding shape 1 768 compare the actual values for a slice
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class TFDPRModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, projection_dim=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.projection_dim = projection_dim def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) config = DPRConfig(projection_dim=self.projection_dim, **config.to_dict()) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_dpr_context_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDPRContextEncoder(config=config) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_dpr_question_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDPRQuestionEncoder(config=config) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_dpr_reader( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDPRReader(config=config) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict @require_tf class TFDPRModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) pipeline_model_mapping = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {} test_resize_embeddings = False test_missing_keys = False test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDPRModelTester(self) self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_dpr_context_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*config_and_inputs) def test_dpr_question_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*config_and_inputs) def test_dpr_reader_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFDPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFDPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFDPRQuestionEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFDPRReader.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFDPRModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base") input_ids = tf.constant( [[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) output = model(input_ids)[0] expected_slice = tf.constant( [ [ 0.03236253, 0.12753335, 0.16818509, 0.00279786, 0.3896933, 0.24264945, 0.2178971, -0.02335227, -0.08481959, -0.14324117, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4))
codingutf8 2020 huggingface licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2020 huggingface licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from transformers import ( DPRContextEncoderTokenizer, DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast, DPRReaderOutput, DPRReaderTokenizer, DPRReaderTokenizerFast, ) from transformers.testing_utils import require_tokenizers, slow from transformers.tokenization_utils_base import BatchEncoding from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class DPRContextEncoderTokenizationTest(BertTokenizationTest): tokenizer_class = DPRContextEncoderTokenizer rust_tokenizer_class = DPRContextEncoderTokenizerFast test_rust_tokenizer = True @require_tokenizers class DPRQuestionEncoderTokenizationTest(BertTokenizationTest): tokenizer_class = DPRQuestionEncoderTokenizer rust_tokenizer_class = DPRQuestionEncoderTokenizerFast test_rust_tokenizer = True @require_tokenizers class DPRReaderTokenizationTest(BertTokenizationTest): tokenizer_class = DPRReaderTokenizer rust_tokenizer_class = DPRReaderTokenizerFast test_rust_tokenizer = True @slow def test_decode_best_spans(self): tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased") text_1 = tokenizer.encode("question sequence", add_special_tokens=False) text_2 = tokenizer.encode("title sequence", add_special_tokens=False) text_3 = tokenizer.encode("text sequence " * 4, add_special_tokens=False) input_ids = [[101] + text_1 + [102] + text_2 + [102] + text_3] reader_input = BatchEncoding({"input_ids": input_ids}) start_logits = [[0] * len(input_ids[0])] end_logits = [[0] * len(input_ids[0])] relevance_logits = [0] reader_output = DPRReaderOutput(start_logits, end_logits, relevance_logits) start_index, end_index = 8, 9 start_logits[0][start_index] = 10 end_logits[0][end_index] = 10 predicted_spans = tokenizer.decode_best_spans(reader_input, reader_output) self.assertEqual(predicted_spans[0].start_index, start_index) self.assertEqual(predicted_spans[0].end_index, end_index) self.assertEqual(predicted_spans[0].doc_id, 0) @slow def test_call(self): tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased") text_1 = tokenizer.encode("question sequence", add_special_tokens=False) text_2 = tokenizer.encode("title sequence", add_special_tokens=False) text_3 = tokenizer.encode("text sequence", add_special_tokens=False) expected_input_ids = [101] + text_1 + [102] + text_2 + [102] + text_3 encoded_input = tokenizer(questions=["question sequence"], titles=["title sequence"], texts=["text sequence"]) self.assertIn("input_ids", encoded_input) self.assertIn("attention_mask", encoded_input) self.assertListEqual(encoded_input["input_ids"][0], expected_input_ids)
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test individual method test by calling coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test individual method test by calling
import unittest import numpy as np from transformers.file_utils import is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import DPTImageProcessor class DPTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DPTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DPTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = DPTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size_divisor")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_padding(self): image_processing = self.image_processing_class(**self.image_processor_dict) image = np.random.randn(3, 249, 491) image = image_processing.pad_image(image, size_divisor=4) self.assertTrue(image.shape[1] % 4 == 0) self.assertTrue(image.shape[2] % 4 == 0) pixel_values = image_processing.preprocess( image, do_rescale=False, do_resize=False, do_pad=True, size_divisor=4, return_tensors="pt" ).pixel_values self.assertTrue(pixel_values.shape[2] % 4 == 0) self.assertTrue(pixel_values.shape[3] % 4 == 0)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dpt model import unittest from transformers import dptconfig from transformers fileutils import istorchavailable isvisionavailable from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import modelmapping dptfordepthestimation dptforsemanticsegmentation dptmodel from transformers models dpt modelingdpt import dptpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import dptimageprocessor class dptmodeltester def init self parent batchsize2 imagesize32 patchsize16 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 backboneoutindices0 1 2 3 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 initializerrange0 02 numlabels3 neckhiddensizes16 32 ishybridfalse scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self backboneoutindices backboneoutindices self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self numlabels numlabels self scope scope self ishybrid ishybrid self neckhiddensizes neckhiddensizes sequence length of dpt numpatches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self imagesize self imagesize self numlabels config self getconfig return config pixelvalues labels def getconfigself return dptconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize fusionhiddensizeself hiddensize numhiddenlayersself numhiddenlayers backboneoutindicesself backboneoutindices numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange ishybridself ishybrid neckhiddensizesself neckhiddensizes def createandcheckmodelself config pixelvalues labels model dptmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckfordepthestimationself config pixelvalues labels config numlabels self numlabels model dptfordepthestimationconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult predicteddepth shape self batchsize self imagesize self imagesize def createandcheckforsemanticsegmentationself config pixelvalues labels config numlabels self numlabels model dptforsemanticsegmentationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequal result logits shape self batchsize self numlabels self imagesize self imagesize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class dptmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses dptmodel dptfordepthestimation dptforsemanticsegmentation if istorchavailable else pipelinemodelmapping depthestimation dptfordepthestimation featureextraction dptmodel imagesegmentation dptforsemanticsegmentation if istorchavailable else testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester dptmodeltesterself self configtester configtesterself configclassdptconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasondpt does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testfordepthestimationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfordepthestimationconfigandinputs def testforsemanticsegmentationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsemanticsegmentationconfigandinputs def testtrainingself for modelclass in self allmodelclasses if modelclass name dptfordepthestimation continue config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true if modelclass in getvaluesmodelmapping continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testtraininggradientcheckpointingself for modelclass in self allmodelclasses if modelclass name dptfordepthestimation continue config inputsdict self modeltester prepareconfigandinputsforcommon config usecache false config returndict true if modelclass in getvaluesmodelmapping or not modelclass supportsgradientcheckpointing continue model modelclassconfig model totorchdevice model gradientcheckpointingenable model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit skip the check for the backbone backboneparams for name module in model namedmodules if module class name dptvithybridembeddings backboneparams fname key for key in module statedict keys break for name param in model namedparameters if param requiresgrad if name in backboneparams continue self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized slow def testmodelfrompretrainedself for modelname in dptpretrainedmodelarchivelist 1 model dptmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision slow class dptmodelintegrationtestunittest testcase def testinferencedepthestimationself imageprocessor dptimageprocessor frompretrainedinteldptlarge model dptfordepthestimation frompretrainedinteldptlarge totorchdevice image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs predicteddepth outputs predicteddepth verify the predicted depth expectedshape torch size1 384 384 self assertequalpredicteddepth shape expectedshape expectedslice torch tensor 6 3199 6 3629 6 4148 6 3850 6 3615 6 4166 6 3519 6 3176 6 3575 totorchdevice self asserttruetorch allcloseoutputs predicteddepth0 3 3 expectedslice atol1e4 def testinferencesemanticsegmentationself imageprocessor dptimageprocessor frompretrainedinteldptlargeade model dptforsemanticsegmentation frompretrainedinteldptlargeade totorchdevice image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 150 480 480 self assertequaloutputs logits shape expectedshape expectedslice torch tensor 4 0480 4 2420 4 4360 4 3124 4 5693 4 8261 4 5768 4 8965 5 2163 totorchdevice self asserttruetorch allcloseoutputs logits0 0 3 3 expectedslice atol1e4 def testpostprocessingsemanticsegmentationself imageprocessor dptimageprocessor frompretrainedinteldptlargeade model dptforsemanticsegmentation frompretrainedinteldptlargeade totorchdevice image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs outputs logits outputs logits detach cpu segmentation imageprocessor postprocesssemanticsegmentationoutputsoutputs targetsizes500 300 expectedshape torch size500 300 self assertequalsegmentation0 shape expectedshape segmentation imageprocessor postprocesssemanticsegmentationoutputsoutputs expectedshape torch size480 480 self assertequalsegmentation0 shape expectedshape coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dpt model sequence length of dpt num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as dpt does not use input_ids inputs_embeds attention_mask and seq_length skip the check for the backbone we will verify our results on an image of cute cats forward pass verify the predicted depth forward pass verify the logits forward pass
import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, image_size=32, patch_size=16, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, backbone_out_indices=[0, 1, 2, 3], num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, neck_hidden_sizes=[16, 32], is_hybrid=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.backbone_out_indices = backbone_out_indices self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.is_hybrid = is_hybrid self.neck_hidden_sizes = neck_hidden_sizes num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DPTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, fusion_hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, neck_hidden_sizes=self.neck_hidden_sizes, ) def create_and_check_model(self, config, pixel_values, labels): model = DPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () pipeline_model_mapping = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DPT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPTModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_semantic_segmentation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade") model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 150, 480, 480)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4)) def test_post_processing_semantic_segmentation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade") model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((480, 480)) self.assertEqual(segmentation[0].shape, expected_shape)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dpt model import unittest from transformers import dinov2config dptconfig from transformers fileutils import istorchavailable isvisionavailable from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelmapping dptfordepthestimation from transformers models dpt modelingdpt import dptpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import dptimageprocessor class dptmodeltester def init self parent batchsize2 numchannels3 imagesize32 patchsize16 uselabelstrue numlabels3 istrainingtrue hiddensize4 numhiddenlayers2 numattentionheads2 intermediatesize8 outfeaturesstage1 stage2 applylayernormfalse reshapehiddenstatesfalse neckhiddensizes2 2 fusionhiddensize6 self parent parent self batchsize batchsize self numchannels numchannels self imagesize imagesize self patchsize patchsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self outfeatures outfeatures self applylayernorm applylayernorm self reshapehiddenstates reshapehiddenstates self uselabels uselabels self numlabels numlabels self istraining istraining self neckhiddensizes neckhiddensizes self fusionhiddensize fusionhiddensize dpt s sequence length self seqlength self imagesize self patchsize 2 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self imagesize self imagesize self numlabels config self getconfig return config pixelvalues labels def getconfigself return dptconfig backboneconfigself getbackboneconfig neckhiddensizesself neckhiddensizes fusionhiddensizeself fusionhiddensize def getbackboneconfigself return dinov2config imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize istrainingself istraining outfeaturesself outfeatures reshapehiddenstatesself reshapehiddenstates def createandcheckfordepthestimationself config pixelvalues labels config numlabels self numlabels model dptfordepthestimationconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult predicteddepth shape self batchsize self imagesize self imagesize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class dptmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses dptfordepthestimation if istorchavailable else pipelinemodelmapping depthestimation dptfordepthestimation if istorchavailable else testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester dptmodeltesterself self configtester configtesterself configclassdptconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasondpt with autobackbone does not have a base model and hence no inputembeddings def testinputsembedsself pass def testfordepthestimationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfordepthestimationconfigandinputs def testtrainingself for modelclass in self allmodelclasses if modelclass name dptfordepthestimation continue config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true if modelclass in getvaluesmodelmapping continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testtraininggradientcheckpointingself for modelclass in self allmodelclasses if modelclass name dptfordepthestimation continue config inputsdict self modeltester prepareconfigandinputsforcommon config usecache false config returndict true if modelclass in getvaluesmodelmapping or not modelclass supportsgradientcheckpointing continue model modelclassconfig model totorchdevice model gradientcheckpointingenable model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit skip the check for the backbone backboneparams for name module in model namedmodules if module class name dptvithybridembeddings backboneparams fname key for key in module statedict keys break for name param in model namedparameters if param requiresgrad if name in backboneparams continue self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized unittest skipreasondpt with autobackbone does not have a base model and hence no inputembeddings def testmodelcommonattributesself pass unittest skipreasondpt with autobackbone does not have a base model def testsaveloadfastinitfrombaseself pass unittest skipreasondpt with autobackbone does not have a base model def testsaveloadfastinittobaseself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass slow def testmodelfrompretrainedself for modelname in dptpretrainedmodelarchivelist 1 model dptfordepthestimation frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision slow class dptmodelintegrationtestunittest testcase def testinferencedepthestimationself imageprocessor dptimageprocessor frompretrainedfacebookdptdinov2smallkitti model dptfordepthestimation frompretrainedfacebookdptdinov2smallkitti totorchdevice image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs predicteddepth outputs predicteddepth verify the predicted depth expectedshape torch size1 576 736 self assertequalpredicteddepth shape expectedshape expectedslice torch tensor 6 0433 7 1636 7 4268 6 9047 7 2471 7 2355 7 9261 8 0631 8 0244 totorchdevice self asserttruetorch allcloseoutputs predicteddepth0 3 3 expectedslice atol1e4 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dpt model dpt s sequence length here we also overwrite some of the tests of test_modeling_common py as dpt does not use input_ids inputs_embeds attention_mask and seq_length skip the check for the backbone we will verify our results on an image of cute cats forward pass verify the predicted depth
import unittest from transformers import Dinov2Config, DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MODEL_MAPPING, DPTForDepthEstimation from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=32, patch_size=16, use_labels=True, num_labels=3, is_training=True, hidden_size=4, num_hidden_layers=2, num_attention_heads=2, intermediate_size=8, out_features=["stage1", "stage2"], apply_layernorm=False, reshape_hidden_states=False, neck_hidden_sizes=[2, 2], fusion_hidden_size=6, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.out_features = out_features self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states self.use_labels = use_labels self.num_labels = num_labels self.is_training = is_training self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.seq_length = (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DPTConfig( backbone_config=self.get_backbone_config(), neck_hidden_sizes=self.neck_hidden_sizes, fusion_hidden_size=self.fusion_hidden_size, ) def get_backbone_config(self): return Dinov2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, is_training=self.is_training, out_features=self.out_features, reshape_hidden_states=self.reshape_hidden_states, ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DPTForDepthEstimation,) if is_torch_available() else () pipeline_model_mapping = {"depth-estimation": DPTForDepthEstimation} if is_torch_available() else {} test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_inputs_embeds(self): pass def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="DPT with AutoBackbone does not have a base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="DPT with AutoBackbone does not have a base model") def test_save_load_fast_init_to_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPTForDepthEstimation.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation(self): image_processor = DPTImageProcessor.from_pretrained("facebook/dpt-dinov2-small-kitti") model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-small-kitti").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth expected_shape = torch.Size((1, 576, 736)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[6.0433, 7.1636, 7.4268], [6.9047, 7.2471, 7.2355], [7.9261, 8.0631, 8.0244]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dpt model import unittest from transformers import dptconfig from transformers fileutils import istorchavailable isvisionavailable from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import modelmapping dptfordepthestimation dptforsemanticsegmentation dptmodel from transformers models dpt modelingdpt import dptpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import dptimageprocessor class dptmodeltester def init self parent batchsize2 imagesize32 patchsize16 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers4 backboneoutindices0 1 2 3 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 initializerrange0 02 numlabels3 backbonefeatmapshape1 32 24 24 neckhiddensizes16 16 32 32 ishybridtrue scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self backboneoutindices backboneoutindices self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self numlabels numlabels self backbonefeatmapshape backbonefeatmapshape self scope scope self ishybrid ishybrid self neckhiddensizes neckhiddensizes sequence length of dpt numpatches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self imagesize self imagesize self numlabels config self getconfig return config pixelvalues labels def getconfigself backboneconfig globalpadding same layertype bottleneck depths 3 4 9 outfeatures stage1 stage2 stage3 embeddingdynamicpadding true hiddensizes 16 16 32 32 numgroups 2 return dptconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize fusionhiddensizeself hiddensize numhiddenlayersself numhiddenlayers backboneoutindicesself backboneoutindices numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange ishybridself ishybrid backboneconfigbackboneconfig backbonefeatmapshapeself backbonefeatmapshape neckhiddensizesself neckhiddensizes def createandcheckmodelself config pixelvalues labels model dptmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckfordepthestimationself config pixelvalues labels config numlabels self numlabels model dptfordepthestimationconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult predicteddepth shape self batchsize self imagesize self imagesize def createandcheckforsemanticsegmentationself config pixelvalues labels config numlabels self numlabels model dptforsemanticsegmentationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequal result logits shape self batchsize self numlabels self imagesize self imagesize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class dptmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses dptmodel dptfordepthestimation dptforsemanticsegmentation if istorchavailable else pipelinemodelmapping depthestimation dptfordepthestimation featureextraction dptmodel imagesegmentation dptforsemanticsegmentation if istorchavailable else testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester dptmodeltesterself self configtester configtesterself configclassdptconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasondpt does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testfordepthestimationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfordepthestimationconfigandinputs def testforsemanticsegmentationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsemanticsegmentationconfigandinputs def testtrainingself for modelclass in self allmodelclasses if modelclass name dptfordepthestimation continue config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true if modelclass in getvaluesmodelmapping continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testtraininggradientcheckpointingself for modelclass in self allmodelclasses if modelclass name dptfordepthestimation continue config inputsdict self modeltester prepareconfigandinputsforcommon config usecache false config returndict true if modelclass in getvaluesmodelmapping or not modelclass supportsgradientcheckpointing continue model modelclassconfig model totorchdevice model gradientcheckpointingenable model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit skip the check for the backbone backboneparams for name module in model namedmodules if module class name dptvithybridembeddings backboneparams fname key for key in module statedict keys break for name param in model namedparameters if param requiresgrad if name in backboneparams continue self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized slow def testmodelfrompretrainedself for modelname in dptpretrainedmodelarchivelist1 model dptmodel frompretrainedmodelname self assertisnotnonemodel def testraisereadouttypeself we do this test only for dptfordepthestimation since it is the only model that uses readouttype config self modeltester prepareconfigandinputsforcommon config readouttype add with self assertraisesvalueerror dptfordepthestimationconfig we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision slow class dptmodelintegrationtestunittest testcase def testinferencedepthestimationself imageprocessor dptimageprocessor frompretrainedinteldpthybridmidas model dptfordepthestimation frompretrainedinteldpthybridmidas totorchdevice image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs predicteddepth outputs predicteddepth verify the predicted depth expectedshape torch size1 384 384 self assertequalpredicteddepth shape expectedshape expectedslice torch tensor 5 6437 5 6146 5 6511 5 4371 5 5649 5 5958 5 5215 5 5184 5 5293 totorchdevice self asserttruetorch allcloseoutputs predicteddepth 3 3 3 100 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dpt model sequence length of dpt num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as dpt does not use input_ids inputs_embeds attention_mask and seq_length skip the check for the backbone we do this test only for dptfordepthestimation since it is the only model that uses readout_type we will verify our results on an image of cute cats forward pass verify the predicted depth
import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, image_size=32, patch_size=16, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=4, backbone_out_indices=[0, 1, 2, 3], num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, backbone_featmap_shape=[1, 32, 24, 24], neck_hidden_sizes=[16, 16, 32, 32], is_hybrid=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.backbone_out_indices = backbone_out_indices self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.backbone_featmap_shape = backbone_featmap_shape self.scope = scope self.is_hybrid = is_hybrid self.neck_hidden_sizes = neck_hidden_sizes num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [16, 16, 32, 32], "num_groups": 2, } return DPTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, fusion_hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=backbone_config, backbone_featmap_shape=self.backbone_featmap_shape, neck_hidden_sizes=self.neck_hidden_sizes, ) def create_and_check_model(self, config, pixel_values, labels): model = DPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () pipeline_model_mapping = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DPT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: model = DPTModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_raise_readout_type(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.readout_type = "add" with self.assertRaises(ValueError): _ = DPTForDepthEstimation(config) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, expected_slice, atol=1e-4))
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import ViTImageProcessor class EfficientFormerImageProcessorTester(unittest.TestCase): def __init__( self, parent, batch_size=13, num_channels=3, image_size=224, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class EfficientFormerImageProcessorTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ViTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = EfficientFormerImageProcessorTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_proc_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size"))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch efficientformer model import unittest import warnings from typing import list from transformers import efficientformerconfig from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelforimageclassificationmapping modelmapping efficientformerforimageclassification efficientformerforimageclassificationwithteacher efficientformermodel from transformers models efficientformer modelingefficientformer import efficientformerpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import efficientformerimageprocessor class efficientformermodeltester def init self parent batchsize int 13 imagesize int 64 patchsize int 2 embeddim int 3 numchannels int 3 istraining bool true uselabels bool true hiddensize int 128 hiddensizes16 32 64 128 numhiddenlayers int 7 numattentionheads int 4 intermediatesize int 37 hiddenact str gelu hiddendropoutprob float 0 1 attentionprobsdropoutprob float 0 1 typesequencelabelsize int 10 initializerrange float 0 02 encoderstride int 2 numattentionoutputs int 1 dim int 128 depths listint 2 2 2 2 resolution int 2 mlpexpansionratio int 2 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self encoderstride encoderstride self numattentionoutputs numattentionoutputs self embeddim embeddim self seqlength embeddim 1 self resolution resolution self depths depths self hiddensizes hiddensizes self dim dim self mlpexpansionratio mlpexpansionratio def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return efficientformerconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange encoderstrideself encoderstride resolutionself resolution depthsself depths hiddensizesself hiddensizes dimself dim mlpexpansionratioself mlpexpansionratio def createandcheckmodelself config pixelvalues labels model efficientformermodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model efficientformerforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model efficientformerforimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class efficientformermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses efficientformermodel efficientformerforimageclassificationwithteacher efficientformerforimageclassification if istorchavailable else pipelinemodelmapping featureextraction efficientformermodel imageclassification efficientformerforimageclassification efficientformerforimageclassificationwithteacher if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester efficientformermodeltesterself self configtester configtester self configclassefficientformerconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonefficientformer does not use inputsembeds def testinputsembedsself pass unittest skipreasonefficientformer does not support input and output embeddings def testmodelcommonattributesself pass def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester encoderseqlength if hasattrself modeltester chunklength and self modeltester chunklength 1 seqlength seqlength self modeltester chunklength else seqlength self modeltester seqlength self assertlistequal listhiddenstates1 shape2 seqlength self modeltester hiddensize if config isencoderdecoder hiddenstates outputs decoderhiddenstates self assertisinstancehiddenstates list tuple self assertequallenhiddenstates expectednumlayers seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen self assertlistequal listhiddenstates1 shape2 decoderseqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass name efficientformerforimageclassificationwithteacher del inputsdictlabels return inputsdict def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonefficientformer does not implement masked image modeling yet def testformaskedimagemodelingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedimagemodelingconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs special case for efficientformerforimageclassificationwithteacher model def testtrainingself if not self modeltester istraining return config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses efficientformerforimageclassificationwithteacher supports inferenceonly if modelclass in getvaluesmodelmapping or modelclass name efficientformerforimageclassificationwithteacher continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testproblemtypesself config inputsdict self modeltester prepareconfigandinputsforcommon problemtypes title multilabelclassification numlabels 2 dtype torch float title singlelabelclassification numlabels 1 dtype torch long title regression numlabels 1 dtype torch float for modelclass in self allmodelclasses if modelclass not in getvaluesmodelforimageclassificationmapping or modelclass name efficientformerforimageclassificationwithteacher continue for problemtype in problemtypes with self subtestmsgftesting modelclass with problemtype title config problemtype problemtypetitle config numlabels problemtypenumlabels model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue if problemtypenumlabels 1 inputslabels inputslabels unsqueeze1 repeat1 problemtypenumlabels inputslabels inputslabels toproblemtypedtype this tests that we do not trigger the warning form pytorch using a target size that is different to the input size this will likely lead to incorrect results due to broadcasting please ensure they have the same size which is a symptom something in wrong for the regression problem see https github comhuggingfacetransformersissues11780 with warnings catchwarningsrecordtrue as warninglist loss modelinputs loss for w in warninglist if using a target size that is different to the input size in strw message raise valueerror fsomething is going wrong in the regression problem intercepted w message loss backward slow def testmodelfrompretrainedself for modelname in efficientformerpretrainedmodelarchivelist 1 model efficientformermodel frompretrainedmodelname self assertisnotnonemodel def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none encoderseqlength getattrself modeltester encoderseqlength seqlen encoderkeylength getattrself modeltester keylength encoderseqlength chunklength getattrself modeltester chunklength none if chunklength is not none and hasattrself modeltester numhashes encoderseqlength encoderseqlength self modeltester numhashes for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numattentionoutputs check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numattentionoutputs if chunklength is not none self assertlistequal listattentions0 shape4 self modeltester numattentionheads encoderseqlength chunklength encoderkeylength else self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class efficientformermodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return efficientformerimageprocessor frompretrainedsnapresearchefficientformerl1300 if isvisionavailable else none slow def testinferenceimageclassificationheadself model efficientformerforimageclassification frompretrainedsnapresearchefficientformerl1300 to torchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape 1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 0555 0 4825 0 0852 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 slow def testinferenceimageclassificationheadwithteacherself model efficientformerforimageclassificationwithteacher frompretrained snapresearchefficientformerl1300 totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape 1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 1312 0 4353 1 0499 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch efficientformer model test greyscale images here we also overwrite some of the tests of test_modeling_common py as efficientformer does not use input_ids inputs_embeds attention_mask and seq_length check that output_hidden_states also work using config special case for efficientformerforimageclassificationwithteacher model efficientformerforimageclassificationwithteacher supports inference only this tests that we do not trigger the warning form pytorch using a target size that is different to the input size this will likely lead to incorrect results due to broadcasting please ensure they have the same size which is a symptom something in wrong for the regression problem see https github com huggingface transformers issues 11780 check that output_attentions also work using config we will verify our results on an image of cute cats forward pass verify the logits forward pass verify the logits
import unittest import warnings from typing import List from transformers import EfficientFormerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, ) from transformers.models.efficientformer.modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class EfficientFormerModelTester: def __init__( self, parent, batch_size: int = 13, image_size: int = 64, patch_size: int = 2, embed_dim: int = 3, num_channels: int = 3, is_training: bool = True, use_labels: bool = True, hidden_size: int = 128, hidden_sizes=[16, 32, 64, 128], num_hidden_layers: int = 7, num_attention_heads: int = 4, intermediate_size: int = 37, hidden_act: str = "gelu", hidden_dropout_prob: float = 0.1, attention_probs_dropout_prob: float = 0.1, type_sequence_label_size: int = 10, initializer_range: float = 0.02, encoder_stride: int = 2, num_attention_outputs: int = 1, dim: int = 128, depths: List[int] = [2, 2, 2, 2], resolution: int = 2, mlp_expansion_ratio: int = 2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.encoder_stride = encoder_stride self.num_attention_outputs = num_attention_outputs self.embed_dim = embed_dim self.seq_length = embed_dim + 1 self.resolution = resolution self.depths = depths self.hidden_sizes = hidden_sizes self.dim = dim self.mlp_expansion_ratio = mlp_expansion_ratio def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return EfficientFormerConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, ) def create_and_check_model(self, config, pixel_values, labels): model = EfficientFormerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = EfficientFormerForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = EfficientFormerForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class EfficientFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( EfficientFormerModel, EfficientFormerForImageClassificationWithTeacher, EfficientFormerForImageClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": EfficientFormerModel, "image-classification": ( EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, ), } if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = EfficientFormerModelTester(self) self.config_tester = ConfigTester( self, config_class=EfficientFormerConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings") def test_model_common_attributes(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[-1].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "EfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet") def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if ( model_class in get_values(MODEL_MAPPING) or model_class.__name__ == "EfficientFormerForImageClassificationWithTeacher" ): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ] or model_class.__name__ == "EfficientFormerForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def test_model_from_pretrained(self): for model_name in EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = EfficientFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_attention_outputs) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class EfficientFormerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = EfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = (1, 1000) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.0555, 0.4825, -0.0852]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0][:3], expected_slice, atol=1e-4)) @slow def test_inference_image_classification_head_with_teacher(self): model = EfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = (1, 1000) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.1312, 0.4353, -1.0499]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0][:3], expected_slice, atol=1e-4))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow efficientformer model import inspect import unittest from typing import list import numpy as np from transformers import efficientformerconfig from transformers testingutils import requiretf requirevision slow from transformers utils import cachedproperty istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfefficientformerforimageclassification tfefficientformerforimageclassificationwithteacher tfefficientformermodel from transformers models efficientformer modelingtfefficientformer import tfefficientformerpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import efficientformerimageprocessor class tfefficientformermodeltester def init self parent batchsize int 13 imagesize int 64 patchsize int 2 embeddim int 3 numchannels int 3 istraining bool true uselabels bool true hiddensize int 128 hiddensizes16 32 64 128 numhiddenlayers int 7 numattentionheads int 4 intermediatesize int 37 hiddenact str gelu hiddendropoutprob float 0 1 attentionprobsdropoutprob float 0 1 typesequencelabelsize int 10 initializerrange float 0 02 encoderstride int 2 numattentionoutputs int 1 dim int 128 depths listint 2 2 2 2 resolution int 2 mlpexpansionratio int 2 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self encoderstride encoderstride self numattentionoutputs numattentionoutputs self embeddim embeddim self seqlength embeddim 1 self resolution resolution self depths depths self hiddensizes hiddensizes self dim dim self mlpexpansionratio mlpexpansionratio def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return efficientformerconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange encoderstrideself encoderstride resolutionself resolution depthsself depths hiddensizesself hiddensizes dimself dim mlpexpansionratioself mlpexpansionratio def createandcheckmodelself config pixelvalues labels model tfefficientformermodelconfigconfig result modelpixelvalues trainingfalse self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model tfefficientformerforimageclassificationconfig result modelpixelvalues labelslabels trainingfalse self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model tfefficientformerforimageclassificationconfig pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfefficientformermodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfefficientformermodel tfefficientformerforimageclassificationwithteacher tfefficientformerforimageclassification if istfavailable else pipelinemodelmapping featureextraction tfefficientformermodel imageclassification tfefficientformerforimageclassification tfefficientformerforimageclassificationwithteacher if istfavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false testonnx false def setupself self modeltester tfefficientformermodeltesterself self configtester configtester self configclassefficientformerconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonefficientformer does not use inputsembeds def testinputsembedsself pass unittest skipreasonefficientformer does not support input and output embeddings def testmodelcommonattributesself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester encoderseqlength if hasattrself modeltester chunklength and self modeltester chunklength 1 seqlength seqlength self modeltester chunklength else seqlength self modeltester seqlength self assertlistequal listhiddenstates1 shape2 seqlength self modeltester hiddensize if config isencoderdecoder hiddenstates outputs decoderhiddenstates self asseretisinstancehiddenstates list tuple self assertequallenhiddenstates expectednumlayers seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen self assertlistequal listhiddenstates1 shape2 decoderseqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass name tfefficientformerforimageclassificationwithteacher del inputsdictlabels return inputsdict def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonefficientformer does not implement masked image modeling yet def testformaskedimagemodelingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedimagemodelingconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in tfefficientformerpretrainedmodelarchivelist 1 model tfefficientformermodel frompretrainedmodelname self assertisnotnonemodel def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none encoderseqlength getattrself modeltester encoderseqlength seqlen encoderkeylength getattrself modeltester keylength encoderseqlength chunklength getattrself modeltester chunklength none if chunklength is not none and hasattrself modeltester numhashes encoderseqlength encoderseqlength self modeltester numhashes for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numattentionoutputs check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numattentionoutputs if chunklength is not none self assertlistequal listattentions0 shape4 self modeltester numattentionheads encoderseqlength chunklength encoderkeylength else self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength def testcompiletfmodelself we use a simplified version of this test for efficientformer because it requires trainingfalse and keras refuses to let us force that during functional construction config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses prepare our model model modelclassconfig these are maximally general inputs for the model with multiple none dimensions hopefully this will catch any conditionals that fail for flexible shapes functionalinputs key tf keras inputshapeval shape1 dtypeval dtype namekey for key val in model inputsignature items if key in model dummyinputs outputsdict modelfunctionalinputs self asserttrueoutputsdict is not none we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretf requirevision class efficientformermodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return efficientformerimageprocessor frompretrainedsnapresearchefficientformerl1300 if isvisionavailable else none slow def testinferenceimageclassificationheadself model tfefficientformerforimageclassification frompretrainedsnapresearchefficientformerl1300 imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorstf forward pass outputs modelinputs trainingfalse verify the logits expectedshape tf tensorshape1 1000 self assertequaloutputs logits shape expectedshape expectedslice tf constant0 0555 0 4825 0 0852 self asserttruenp allcloseoutputs logits0 3 expectedslice atol1e4 slow def testinferenceimageclassificationheadwithteacherself model tfefficientformerforimageclassificationwithteacher frompretrained snapresearchefficientformerl1300 imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorstf forward pass outputs modelinputs trainingfalse verify the logits expectedshape tf tensorshape1 1000 self assertequaloutputs logits shape expectedshape expectedslice tf constant0 1312 0 4353 1 0499 self asserttruenp allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow efficientformer model test greyscale images here we also overwrite some of the tests of test_modeling_tf_common py as efficientformer does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic check that output_hidden_states also work using config check that output_attentions also work using config we use a simplified version of this test for efficientformer because it requires training false and keras refuses to let us force that during functional construction prepare our model these are maximally general inputs for the model with multiple none dimensions hopefully this will catch any conditionals that fail for flexible shapes we will verify our results on an image of cute cats forward pass verify the logits forward pass verify the logits
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class TFEfficientFormerModelTester: def __init__( self, parent, batch_size: int = 13, image_size: int = 64, patch_size: int = 2, embed_dim: int = 3, num_channels: int = 3, is_training: bool = True, use_labels: bool = True, hidden_size: int = 128, hidden_sizes=[16, 32, 64, 128], num_hidden_layers: int = 7, num_attention_heads: int = 4, intermediate_size: int = 37, hidden_act: str = "gelu", hidden_dropout_prob: float = 0.1, attention_probs_dropout_prob: float = 0.1, type_sequence_label_size: int = 10, initializer_range: float = 0.02, encoder_stride: int = 2, num_attention_outputs: int = 1, dim: int = 128, depths: List[int] = [2, 2, 2, 2], resolution: int = 2, mlp_expansion_ratio: int = 2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.encoder_stride = encoder_stride self.num_attention_outputs = num_attention_outputs self.embed_dim = embed_dim self.seq_length = embed_dim + 1 self.resolution = resolution self.depths = depths self.hidden_sizes = hidden_sizes self.dim = dim self.mlp_expansion_ratio = mlp_expansion_ratio def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return EfficientFormerConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, ) def create_and_check_model(self, config, pixel_values, labels): model = TFEfficientFormerModel(config=config) result = model(pixel_values, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFEfficientFormerForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = TFEfficientFormerForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFEfficientFormerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFEfficientFormerModel, "image-classification": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFEfficientFormerModelTester(self) self.config_tester = ConfigTester( self, config_class=EfficientFormerConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.asseretIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[-1].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet") def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFEfficientFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_attention_outputs) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_compile_tf_model(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) functional_inputs = { key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=key) for key, val in model.input_signature.items() if key in model.dummy_inputs } outputs_dict = model(functional_inputs) self.assertTrue(outputs_dict is not None) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class EfficientFormerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") outputs = model(**inputs, training=False) expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_image_classification_head_with_teacher(self): model = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") outputs = model(**inputs, training=False) expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license efficientnet optionally rescales between 1 and 1 instead of the usual 0 and 1 coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license efficientnet optionally rescales between 1 and 1 instead of the usual 0 and 1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import EfficientNetImageProcessor class EfficientNetImageProcessorTester(unittest.TestCase): def __init__( self, parent, batch_size=13, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class EfficientNetImageProcessorTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = EfficientNetImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = EfficientNetImageProcessorTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_rescale(self): image = np.arange(0, 256, 1, dtype=np.uint8).reshape(1, 8, 32) image_processor = self.image_processing_class(**self.image_processor_dict) rescaled_image = image_processor.rescale(image, scale=1 / 127.5) expected_image = (image * (1 / 127.5)).astype(np.float32) - 1 self.assertTrue(np.allclose(rescaled_image, expected_image)) rescaled_image = image_processor.rescale(image, scale=1 / 255, offset=False) expected_image = (image / 255.0).astype(np.float32) self.assertTrue(np.allclose(rescaled_image, expected_image))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch efficientnet model import unittest from transformers import efficientnetconfig from transformers testingutils import ispipelinetest requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import efficientnetforimageclassification efficientnetmodel from transformers models efficientnet modelingefficientnet import efficientnetpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class efficientnetmodeltester def init self parent batchsize13 imagesize32 numchannels3 kernelsizes3 3 5 inchannels32 16 24 outchannels16 24 20 strides1 1 2 numblockrepeats1 1 2 expandratios1 6 6 istrainingtrue uselabelstrue intermediatesize37 hiddenactgelu numlabels10 self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self kernelsizes kernelsizes self inchannels inchannels self outchannels outchannels self strides strides self numblockrepeats numblockrepeats self expandratios expandratios self istraining istraining self hiddenact hiddenact self numlabels numlabels self uselabels uselabels def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself return efficientnetconfig numchannelsself numchannels kernelsizesself kernelsizes inchannelsself inchannels outchannelsself outchannels stridesself strides numblockrepeatsself numblockrepeats expandratiosself expandratios hiddenactself hiddenact numlabelsself numlabels def createandcheckmodelself config pixelvalues labels model efficientnetmodelconfigconfig model totorchdevice model eval result modelpixelvalues expected last hidden states b c h 4 w 4 self parent assertequal result lasthiddenstate shape self batchsize config hiddendim self imagesize 4 self imagesize 4 def createandcheckforimageclassificationself config pixelvalues labels model efficientnetforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self numlabels def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class efficientnetmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses efficientnetmodel efficientnetforimageclassification if istorchavailable else pipelinemodelmapping featureextraction efficientnetmodel imageclassification efficientnetforimageclassification if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester efficientnetmodeltesterself self configtester configtester self configclassefficientnetconfig hastextmodalityfalse hiddensize37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return unittest skipreasonefficientnet does not use inputsembeds def testinputsembedsself pass unittest skipreasonefficientnet does not support input and output embeddings def testmodelcommonattributesself pass unittest skipreasonefficientnet does not use feedforward chunking def testfeedforwardchunkingself pass def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates numblocks sumconfig numblockrepeats 4 self assertequallenhiddenstates numblocks efficientnet s feature maps are of shape batchsize numchannels height width self assertlistequal listhiddenstates0 shape2 self modeltester imagesize 2 self modeltester imagesize 2 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in efficientnetpretrainedmodelarchivelist 1 model efficientnetmodel frompretrainedmodelname self assertisnotnonemodel ispipelinetest requirevision slow def testpipelineimageclassificationself super testpipelineimageclassification we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class efficientnetmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedgoogleefficientnetb7 if isvisionavailable else none slow def testinferenceimageclassificationheadself model efficientnetforimageclassification frompretrainedgoogleefficientnetb7 totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 2962 0 4487 0 4499 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch efficientnet model expected last hidden states b c h 4 w 4 here we also overwrite some of the tests of test_modeling_common py as efficientnet does not use input_ids inputs_embeds attention_mask and seq_length efficientnet s feature maps are of shape batch_size num_channels height width check that output_hidden_states also work using config we will verify our results on an image of cute cats forward pass verify the logits
import unittest from transformers import EfficientNetConfig from transformers.testing_utils import is_pipeline_test, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EfficientNetForImageClassification, EfficientNetModel from transformers.models.efficientnet.modeling_efficientnet import EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class EfficientNetModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, kernel_sizes=[3, 3, 5], in_channels=[32, 16, 24], out_channels=[16, 24, 20], strides=[1, 1, 2], num_block_repeats=[1, 1, 2], expand_ratios=[1, 6, 6], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", num_labels=10, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.kernel_sizes = kernel_sizes self.in_channels = in_channels self.out_channels = out_channels self.strides = strides self.num_block_repeats = num_block_repeats self.expand_ratios = expand_ratios self.is_training = is_training self.hidden_act = hidden_act self.num_labels = num_labels self.use_labels = use_labels def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return EfficientNetConfig( num_channels=self.num_channels, kernel_sizes=self.kernel_sizes, in_channels=self.in_channels, out_channels=self.out_channels, strides=self.strides, num_block_repeats=self.num_block_repeats, expand_ratios=self.expand_ratios, hidden_act=self.hidden_act, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = EfficientNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, config.hidden_dim, self.image_size // 4, self.image_size // 4), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): model = EfficientNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class EfficientNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (EfficientNetModel, EfficientNetForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": EfficientNetModel, "image-classification": EfficientNetForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = EfficientNetModelTester(self) self.config_tester = ConfigTester( self, config_class=EfficientNetConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="EfficientNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="EfficientNet does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="EfficientNet does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states num_blocks = sum(config.num_block_repeats) * 4 self.assertEqual(len(hidden_states), num_blocks) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = EfficientNetModel.from_pretrained(model_name) self.assertIsNotNone(model) @is_pipeline_test @require_vision @slow def test_pipeline_image_classification(self): super().test_pipeline_image_classification() def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class EfficientNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("google/efficientnet-b7") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = EfficientNetForImageClassification.from_pretrained("google/efficientnet-b7").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.2962, 0.4487, 0.4499]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license special case for forpretraining model coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license special case for forpretraining model
import unittest from transformers import ElectraConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ) from transformers.models.electra.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST class ElectraModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1) config = self.get_config() return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) def get_config(self): return ElectraConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, _, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_electra_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = ElectraModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_electra_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = ElectraModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_electra_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = ElectraForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_electra_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = ElectraForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_electra_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = ElectraForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_electra_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = ElectraForPreTraining(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_electra_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = ElectraForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_electra_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = ElectraForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_electra_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_choices = self.num_choices model = ElectraForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ElectraModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ElectraModel, ElectraForPreTraining, ElectraForMaskedLM, ElectraForCausalLM, ElectraForMultipleChoice, ElectraForTokenClassification, ElectraForSequenceClassification, ElectraForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ElectraModel, "fill-mask": ElectraForMaskedLM, "question-answering": ElectraForQuestionAnswering, "text-classification": ElectraForSequenceClassification, "text-generation": ElectraForCausalLM, "token-classification": ElectraForTokenClassification, "zero-shot": ElectraForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = ElectraModelTester(self) self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_electra_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_model(*config_and_inputs) def test_electra_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_electra_model_as_decoder(*config_and_inputs) def test_electra_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_electra_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_token_classification(*config_and_inputs) def test_for_pre_training(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_pretraining(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_sequence_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_question_answering(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ElectraModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_electra_for_causal_lm(*config_and_inputs) @require_torch class ElectraModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = ElectraModel.from_pretrained("google/electra-small-discriminator") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 256)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.4471, 0.6821, -0.3265], [0.4627, 0.5255, -0.3668], [0.4532, 0.3313, -0.4344]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license also check the case where encoder outputs are not passed first forward pass create hypothetical next token and extent to nextinputids append to next inputids and attnmask select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice test the base model configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testcausallmbasemodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckcausallmbasemodelconfigandinputs def testmodelasdecoderself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckmodelasdecoderconfigandinputs def testcausallmbasemodelpastself test the causal lm base model with pastkeyvalues and attentionmask configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckcausallmbasemodelpastwithattnmaskconfigandinputs def testcausallmbasemodelpastwithlargeinputsself similar to testcausallmbasemodelpastwithlargeinputs but with crossattention configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testforpretrainingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforpretrainingconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in tfelectrapretrainedmodelarchivelist 1 for modelname in googleelectrasmalldiscriminator model tfelectramodel frompretrainedmodelname self assertisnotnonemodel requiretf class tfelectramodelintegrationtestunittest testcase slow def testinferencemaskedlmself model tfelectraforpretraining frompretrainedlysandretinyelectrarandom inputids tf constant0 1 2 3 4 5 output modelinputids0 expectedshape 1 6 self assertequaloutput shape expectedshape printoutput 3 expectedslice tf constant0 24651965 0 8835437 1 823782 tf debugging assertnearoutput 3 expectedslice atol1e4 coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license also check the case where encoder outputs are not passed first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and attn_mask select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice test the base model test the base model of the causal lm model is_deocder true no cross_attention no encoder outputs test the base model as a decoder of an encoder decoder architecture is_deocder true cross_attention pass encoder outputs test causal lm base model with past_key_values test the causal lm base model with past_key_values and attention_mask test the causal lm base model with past_key_values and a longer decoder sequence length similar to test_causal_lm_base_model_past_with_large_inputs but with cross attention for model_name in tf_electra_pretrained_model_archive_list 1
from __future__ import annotations import unittest from transformers import ElectraConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.electra.modeling_tf_electra import ( TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, ) class TFElectraModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.embedding_size = 128 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = ElectraConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFElectraModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_causal_lm_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFElectraModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFElectraModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_causal_lm_base_model_past( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFElectraModel(config=config) outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, output_hidden_states=True ).hidden_states[0] random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_base_model_past_with_attn_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFElectraModel(config=config) half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) past_key_values = outputs.past_key_values random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) output_from_no_past = model( next_input_ids, attention_mask=attn_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True ).hidden_states[0] random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_base_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFElectraModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] self.batch_size = 1 outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFElectraModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] encoder_hidden_states = encoder_hidden_states[:1, :, :] encoder_attention_mask = encoder_attention_mask[:1, :] self.batch_size = 1 outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFElectraForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFElectraForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFElectraForSequenceClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFElectraForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFElectraForQuestionAnswering(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFElectraForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFElectraModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFElectraModel, TFElectraForMaskedLM, TFElectraForPreTraining, TFElectraForTokenClassification, TFElectraForMultipleChoice, TFElectraForSequenceClassification, TFElectraForQuestionAnswering, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFElectraModel, "fill-mask": TFElectraForMaskedLM, "question-answering": TFElectraForQuestionAnswering, "text-classification": TFElectraForSequenceClassification, "token-classification": TFElectraForTokenClassification, "zero-shot": TFElectraForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFElectraModelTester(self) self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_causal_lm_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_causal_lm_base_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model_past(*config_and_inputs) def test_causal_lm_base_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model_past_with_attn_mask(*config_and_inputs) def test_causal_lm_base_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ["google/electra-small-discriminator"]: model = TFElectraModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFElectraModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFElectraForPreTraining.from_pretrained("lysandre/tiny-electra-random") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6] self.assertEqual(output.shape, expected_shape) print(output[:, :3]) expected_slice = tf.constant([[-0.24651965, 0.8835437, 1.823782]]) tf.debugging.assert_near(output[:, :3], expected_slice, atol=1e-4)
codingutf8 20212023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the encodec feature extractor import itertools import random import unittest import numpy as np from transformers import encodecfeatureextractor from transformers testingutils import requiretorch from transformers utils importutils import istorchavailable from testsequencefeatureextractioncommon import sequencefeatureextractiontestmixin if istorchavailable import torch globalrng random random copied from tests models whisper testfeatureextractionwhisper floatslist def floatslistshape scale1 0 rngnone namenone make sure that inputs increase in size tests that all call wrap to encodeplus and batchencodeplus create three inputs of length 800 1000 and 1200 test not batched input test batched automatic decoding with librispeech fmt off fmt on fmt off fmt on would be easier if the stride was like pad and trunc raise an error truncate to chunk force truncate to maxlength pad to chunk pad to chunk force pad to max length force no pad no pad if no chunklengths no pad if no overlap coding utf 8 2021 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the encodec feature extractor copied from tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor make sure that inputs increase in size tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test not batched input test batched automatic decoding with librispeech fmt off fmt on fmt off fmt on would be easier if the stride was like pad and trunc raise an error truncate to chunk 2 chunks force truncate to max_length pad to chunk pad to chunk force pad to max length force no pad no pad if no chunk_length_s no pad if no overlap
import itertools import random import unittest import numpy as np from transformers import EncodecFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch class EnCodecFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=24000, return_attention_mask=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: audio_inputs = floats_list((self.batch_size, self.max_seq_length)) else: audio_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: audio_inputs = [np.asarray(x) for x in audio_inputs] return audio_inputs @require_torch class EnCodecFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = EncodecFeatureExtractor def setUp(self): self.feat_extract_tester = EnCodecFeatureExtractionTester(self) def test_call(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) audio_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_audio_inputs = [np.asarray(audio_input) for audio_input in audio_inputs] encoded_sequences_1 = feat_extract(audio_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_audio_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feat_extract(audio_inputs, padding=True, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_audio_inputs, padding=True, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_double_precision_pad(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_audio_inputs = np.random.rand(100).astype(np.float64) py_audio_inputs = np_audio_inputs.tolist() for inputs in [py_audio_inputs, np_audio_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in audio_samples] def test_integration(self): EXPECTED_INPUT_VALUES = torch.tensor( [2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03, 3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03, 2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04, 4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03, 7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04, 4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] ) input_audio = self._load_datasamples(1) feature_extractor = EncodecFeatureExtractor() input_values = feature_extractor(input_audio, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 1, 93680)) self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-6)) def test_integration_stereo(self): EXPECTED_INPUT_VALUES = torch.tensor( [2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03, 3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03, 2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04, 4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03, 7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04, 4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] ) input_audio = self._load_datasamples(1) input_audio = [np.tile(input_audio[0][None], reps=(2, 1))] input_audio[0][1] *= 0.5 feature_extractor = EncodecFeatureExtractor(feature_size=2) input_values = feature_extractor(input_audio, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 2, 93680)) self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-6)) self.assertTrue(torch.allclose(input_values[0, 1, :30], EXPECTED_INPUT_VALUES * 0.5, atol=1e-6)) def test_truncation_and_padding(self): input_audio = self._load_datasamples(2) feature_extractor = EncodecFeatureExtractor(feature_size=1, chunk_length_s=1, overlap=0.01) with self.assertRaisesRegex( ValueError, "^Both padding and truncation were set. Make sure you only set one.$", ): truncated_outputs = feature_extractor( input_audio, padding="max_length", truncation=True, return_tensors="pt" ).input_values truncated_outputs = feature_extractor(input_audio, truncation=True, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (2, 1, 71520)) truncated_outputs = feature_extractor( input_audio, truncation=True, max_length=48000, return_tensors="pt" ).input_values self.assertEquals(truncated_outputs.shape, (2, 1, 48000)) padded_outputs = feature_extractor(input_audio, padding=True, return_tensors="pt").input_values self.assertEquals(padded_outputs.shape, (2, 1, 95280)) truncated_outputs = feature_extractor(input_audio, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (2, 1, 95280)) truncated_outputs = feature_extractor( input_audio, padding="max_length", max_length=100000, return_tensors="pt" ).input_values self.assertEquals(truncated_outputs.shape, (2, 1, 100000)) with self.assertRaisesRegex( ValueError, "^Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.$", ): truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (1, 1, 93680)) feature_extractor.chunk_length_s = None with self.assertRaisesRegex( ValueError, "^Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.$", ): truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (1, 1, 93680)) feature_extractor.chunk_length_s = 2 feature_extractor.overlap = None with self.assertRaisesRegex( ValueError, "^Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.$", ): truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values self.assertEquals(truncated_outputs.shape, (1, 1, 93680))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch encodec model import copy import inspect import os import tempfile import unittest from typing import dict list tuple import numpy as np from datasets import audio loaddataset from transformers import autoprocessor encodecconfig from transformers testingutils import istorchavailable requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import encodecmodel def prepareinputsdict config inputidsnone inputvaluesnone decoderinputidsnone attentionmasknone decoderattentionmasknone headmasknone decoderheadmasknone crossattnheadmasknone if inputids is not none encoderdict inputids inputids else encoderdict inputvalues inputvalues decoderdict decoderinputids decoderinputids if decoderinputids is not none else return encoderdict decoderdict requiretorch class encodecmodeltester def init self parent batchsize needs to be an even number if the model has some outputs with batch dim 0 batchsize12 numchannels2 istrainingfalse intermediatesize40 hiddensize32 numfilters8 numresiduallayers1 upsamplingratios8 4 numlstmlayers1 codebooksize64 self parent parent self batchsize batchsize self numchannels numchannels self istraining istraining self intermediatesize intermediatesize self hiddensize hiddensize self numfilters numfilters self numresiduallayers numresiduallayers self upsamplingratios upsamplingratios self numlstmlayers numlstmlayers self codebooksize codebooksize def prepareconfigandinputsself inputvalues floatstensorself batchsize self numchannels self intermediatesize scale1 0 config self getconfig inputsdict inputvalues inputvalues return config inputsdict def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def getconfigself return encodecconfig audiochannelsself numchannels chunkinsecnone hiddensizeself hiddensize numfiltersself numfilters numresiduallayersself numresiduallayers upsamplingratiosself upsamplingratios numlstmlayersself numlstmlayers codebooksizeself codebooksize def createandcheckmodelforwardself config inputsdict model encodecmodelconfigconfig totorchdevice eval inputvalues inputsdictinputvalues result modelinputvalues self parent assertequal result audiovalues shape self batchsize self numchannels self intermediatesize requiretorch class encodecmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses encodecmodel if istorchavailable else isencoderdecoder true testpruning false testheadmasking false testresizeembeddings false pipelinemodelmapping featureextraction encodecmodel if istorchavailable else inputname inputvalues def prepareforclassself inputsdict modelclass returnlabelsfalse model does not have attention and does not support returning hidden states inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if outputattentions in inputsdict inputsdict popoutputattentions if outputhiddenstates in inputsdict inputsdict popoutputhiddenstates return inputsdict def setupself self modeltester encodecmodeltesterself self configtester configtester self configclassencodecconfig hiddensize37 commonproperties hastextmodalityfalse def testconfigself self configtester runcommontests def testmodelforwardself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelforwardconfigandinputs def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputvalues paddingmask bandwidth self assertlistequalargnames lenexpectedargnames expectedargnames unittest skipthe encodecmodel is not transformers based thus it does not have inputsembeds logics def testinputsembedsself pass unittest skipthe encodecmodel is not transformers based thus it does not have inputsembeds logics def testmodelcommonattributesself pass unittest skipthe encodecmodel is not transformers based thus it does not have the usual attention logic def testretaingradhiddenstatesattentionsself pass unittest skipthe encodecmodel is not transformers based thus it does not have the usual attention logic def testtorchscriptoutputattentionsself pass unittest skipthe encodecmodel is not transformers based thus it does not have the usual hiddenstates logic def testtorchscriptoutputhiddenstateself pass def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval inputs self prepareforclassinputsdict modelclass maininputname modelclass maininputname try maininput inputsmaininputname modelmaininput tracedmodel torch jit tracemodel maininput except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items if layername in loadedmodelstatedict p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb self cleartorchjitclassregistry unittest skipthe encodecmodel is not transformers based thus it does not have the usual attention logic def testattentionoutputsself pass def testfeedforwardchunkingself originalconfig inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses torch manualseed0 config copy deepcopyoriginalconfig config chunklengths none config overlap none config samplingrate 10 model modelclassconfig model totorchdevice model eval inputs self prepareforclassinputsdict modelclass inputsinputvalues inputsinputvalues repeat1 1 10 hiddenstatesnochunk modelinputs0 torch manualseed0 config chunklengths 1 config overlap 0 config samplingrate 10 model modelclassconfig model totorchdevice model eval hiddenstateswithchunk modelinputs0 self asserttruetorch allclosehiddenstatesnochunk hiddenstateswithchunk atol1e3 unittest skipthe encodecmodel is not transformers based thus it does not have the usual hiddenstates logic def testhiddenstatesoutputself pass def testdeterminismself config inputsdict self modeltester prepareconfigandinputsforcommon def checkdeterminismfirst second outputs are not tensors but list since each sequence don t have the same framelength out1 first cpu numpy out2 second cpu numpy out1 out1np isnanout1 out2 out2np isnanout2 maxdiff np amaxnp absout1 out2 self assertlessequalmaxdiff 1e5 for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval with torch nograd first modelself prepareforclassinputsdict modelclass0 second modelself prepareforclassinputsdict modelclass0 if isinstancefirst tuple and isinstancesecond tuple for tensor1 tensor2 in zipfirst second checkdeterminismtensor1 tensor2 else checkdeterminismfirst second def testmodeloutputsequivalenceself config inputsdict self modeltester prepareconfigandinputsforcommon def setnantensortozerot tt t 0 return t def checkequivalencemodel tupleinputs dictinputs additionalkwargs with torch nograd tupleoutput modeltupleinputs returndictfalse additionalkwargs dictoutput modeldictinputs returndicttrue additionalkwargs def recursivechecktupleobject dictobject if isinstancetupleobject list tuple for tupleiterablevalue dictiterablevalue in ziptupleobject dictobject recursivechecktupleiterablevalue dictiterablevalue elif isinstancetupleobject dict for tupleiterablevalue dictiterablevalue in zip tupleobject values dictobject values recursivechecktupleiterablevalue dictiterablevalue elif tupleobject is none return else self asserttrue torch allclose setnantensortozerotupleobject setnantensortozerodictobject atol1e5 msg tuple and dict output are not equal difference f torch maxtorch abstupleobject dictobject tuple has nan f torch isnantupleobject any and inf torch isinftupleobject dict has f nan torch isnandictobject any and inf torch isinfdictobject recursivechecktupleoutput dictoutput for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv ignoreinit lstm if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized elif not anyx in name for x in ignoreinit self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def testidentityshortcutself config inputsdict self modeltester prepareconfigandinputs config useconvshortcut false self modeltester createandcheckmodelforwardconfig inputsdict def normalizearr norm np linalg normarr normalizedarr arr norm return normalizedarr def computermsearr1 arr2 arr1normalized normalizearr1 arr2normalized normalizearr2 return np sqrtarr1normalized arr2normalized 2 mean slow requiretorch class encodecintegrationtestunittest testcase def testintegration24khzself expectedrmse 1 5 0 0025 24 0 0 0015 expectedcodesums 1 5 371955 24 0 6659962 librispeechdummy loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation modelid facebookencodec24khz model encodecmodel frompretrainedmodelid totorchdevice processor autoprocessor frompretrainedmodelid librispeechdummy librispeechdummy castcolumnaudio audiosamplingrateprocessor samplingrate audiosample librispeechdummy1audioarray inputs processor rawaudioaudiosample samplingrateprocessor samplingrate returntensorspt totorchdevice for bandwidth expectedrmse in expectedrmse items with torch nograd use max bandwith for best possible reconstruction encoderoutputs model encodeinputsinputvalues bandwidthfloatbandwidth audiocodesums a0 sum cpu item for a in encoderoutputs0 make sure audio encoded codes are correct self assertlistequalaudiocodesums expectedcodesumsbandwidth audiocodes scales encoderoutputs totuple inputvaluesdec model decodeaudiocodes scales inputspaddingmask0 inputvaluesencdec model inputsinputvalues inputspaddingmask bandwidthfloatbandwidth 1 make sure forward and decode gives same result self asserttruetorch allcloseinputvaluesdec inputvaluesencdec atol1e3 make sure shape matches self asserttrueinputsinputvalues shape inputvaluesencdec shape arr inputsinputvalues0 cpu numpy arrencdec inputvaluesencdec0 cpu numpy make sure audios are more or less equal the rmse of two random gaussian noise vectors with n0 1 is around 1 0 rmse computermsearr arrencdec self asserttruermse expectedrmse def testintegration48khzself expectedrmse 3 0 0 001 24 0 0 0005 expectedcodesums 3 0 144259 146765 156435 176871 161971 24 0 1568553 1294948 1306190 1464747 1663150 librispeechdummy loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation modelid facebookencodec48khz model encodecmodel frompretrainedmodelid totorchdevice model model eval processor autoprocessor frompretrainedmodelid librispeechdummy librispeechdummy castcolumnaudio audiosamplingrateprocessor samplingrate audiosample librispeechdummy1audioarray transform mono to stereo audiosample np arrayaudiosample audiosample inputs processorrawaudioaudiosample samplingrateprocessor samplingrate returntensorspt to torchdevice for bandwidth expectedrmse in expectedrmse items with torch nograd use max bandwith for best possible reconstruction encoderoutputs model encode inputsinputvalues inputspaddingmask bandwidthfloatbandwidth returndictfalse audiocodesums a0 sum cpu item for a in encoderoutputs0 make sure audio encoded codes are correct self assertlistequalaudiocodesums expectedcodesumsbandwidth audiocodes scales encoderoutputs inputvaluesdec model decodeaudiocodes scales inputspaddingmask0 inputvaluesencdec model inputsinputvalues inputspaddingmask bandwidthfloatbandwidth 1 make sure forward and decode gives same result self asserttruetorch allcloseinputvaluesdec inputvaluesencdec atol1e3 make sure shape matches self asserttrueinputsinputvalues shape inputvaluesencdec shape arr inputsinputvalues0 cpu numpy arrencdec inputvaluesencdec0 cpu numpy make sure audios are more or less equal the rmse of two random gaussian noise vectors with n0 1 is around 1 0 rmse computermsearr arrencdec self asserttruermse expectedrmse def testbatch48khzself expectedrmse 3 0 0 001 24 0 0 0005 expectedcodesums 3 0 72410 79137 76694 90854 73023 82980 72707 54842 85561 81870 76953 48967 79315 85442 81479 107241 24 0 72410 79137 76694 90854 73023 82980 72707 54842 85561 81870 76953 48967 79315 85442 81479 107241 librispeechdummy loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation modelid facebookencodec48khz model encodecmodel frompretrainedmodelid totorchdevice processor autoprocessor frompretrainedmodelid chunklengths1 overlap0 01 librispeechdummy librispeechdummy castcolumnaudio audiosamplingrateprocessor samplingrate audiosamples np arrayaudiosamplearray audiosamplearray for audiosample in librispeechdummy2 audio inputs processorrawaudioaudiosamples samplingrateprocessor samplingrate returntensorspt inputvalues inputsinputvalues totorchdevice for bandwidth expectedrmse in expectedrmse items with torch nograd use max bandwith for best possible reconstruction encoderoutputs model encodeinputvalues bandwidthfloatbandwidth returndictfalse audiocodesums0 a00 sum cpu item for a in encoderoutputs0 audiocodesums1 a01 sum cpu item for a in encoderoutputs0 make sure audio encoded codes are correct self assertlistequalaudiocodesums0 expectedcodesumsbandwidth0 self assertlistequalaudiocodesums1 expectedcodesumsbandwidth1 audiocodes scales encoderoutputs inputvaluesdec model decodeaudiocodes scales0 inputvaluesencdec modelinputvalues bandwidthfloatbandwidth1 make sure forward and decode gives same result self asserttruetorch allcloseinputvaluesdec inputvaluesencdec atol1e3 make sure shape matches self asserttrueinputvalues shape inputvaluesencdec shape arr inputvalues0 cpu numpy arrencdec inputvaluesencdec0 cpu numpy make sure audios are more or less equal the rmse of two random gaussian noise vectors with n0 1 is around 1 0 rmse computermsearr arrencdec self asserttruermse expectedrmse coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch encodec model batch_size needs to be an even number if the model has some outputs with batch dim 0 model does not have attention and does not support returning hidden states signature parameters is an ordereddict so arg_names order is deterministic to be sure we have no nan avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb outputs are not tensors but list since each sequence don t have the same frame_length use max bandwith for best possible reconstruction make sure audio encoded codes are correct make sure forward and decode gives same result make sure shape matches make sure audios are more or less equal the rmse of two random gaussian noise vectors with n 0 1 is around 1 0 transform mono to stereo use max bandwith for best possible reconstruction make sure audio encoded codes are correct make sure forward and decode gives same result make sure shape matches make sure audios are more or less equal the rmse of two random gaussian noise vectors with n 0 1 is around 1 0 use max bandwith for best possible reconstruction make sure audio encoded codes are correct make sure forward and decode gives same result make sure shape matches make sure audios are more or less equal the rmse of two random gaussian noise vectors with n 0 1 is around 1 0
import copy import inspect import os import tempfile import unittest from typing import Dict, List, Tuple import numpy as np from datasets import Audio, load_dataset from transformers import AutoProcessor, EncodecConfig from transformers.testing_utils import ( is_torch_available, require_torch, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EncodecModel def prepare_inputs_dict( config, input_ids=None, input_values=None, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if input_ids is not None: encoder_dict = {"input_ids": input_ids} else: encoder_dict = {"input_values": input_values} decoder_dict = {"decoder_input_ids": decoder_input_ids} if decoder_input_ids is not None else {} return {**encoder_dict, **decoder_dict} @require_torch class EncodecModelTester: def __init__( self, parent, batch_size=12, num_channels=2, is_training=False, intermediate_size=40, hidden_size=32, num_filters=8, num_residual_layers=1, upsampling_ratios=[8, 4], num_lstm_layers=1, codebook_size=64, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.intermediate_size = intermediate_size self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.num_lstm_layers = num_lstm_layers self.codebook_size = codebook_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0) config = self.get_config() inputs_dict = {"input_values": input_values} return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return EncodecConfig( audio_channels=self.num_channels, chunk_in_sec=None, hidden_size=self.hidden_size, num_filters=self.num_filters, num_residual_layers=self.num_residual_layers, upsampling_ratios=self.upsampling_ratios, num_lstm_layers=self.num_lstm_layers, codebook_size=self.codebook_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = EncodecModel(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] result = model(input_values) self.parent.assertEqual( result.audio_values.shape, (self.batch_size, self.num_channels, self.intermediate_size) ) @require_torch class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (EncodecModel,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False pipeline_model_mapping = {"feature-extraction": EncodecModel} if is_torch_available() else {} input_name = "input_values" def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if "output_attentions" in inputs_dict: inputs_dict.pop("output_attentions") if "output_hidden_states" in inputs_dict: inputs_dict.pop("output_hidden_states") return inputs_dict def setUp(self): self.model_tester = EncodecModelTester(self) self.config_tester = ConfigTester( self, config_class=EncodecConfig, hidden_size=37, common_properties=[], has_text_modality=False ) def test_config(self): self.config_tester.run_common_tests() def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values", "padding_mask", "bandwidth"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip("The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") def test_inputs_embeds(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") def test_model_common_attributes(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") def test_torchscript_output_attentions(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic") def test_torchscript_output_hidden_state(self): pass def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: main_input = inputs[main_input_name] model(main_input) traced_model = torch.jit.trace(model, main_input) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) self.clear_torch_jit_class_registry() @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") def test_attention_outputs(self): pass def test_feed_forward_chunking(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: torch.manual_seed(0) config = copy.deepcopy(original_config) config.chunk_length_s = None config.overlap = None config.sampling_rate = 10 model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) inputs["input_values"] = inputs["input_values"].repeat(1, 1, 10) hidden_states_no_chunk = model(**inputs)[0] torch.manual_seed(0) config.chunk_length_s = 1 config.overlap = 0 config.sampling_rate = 10 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**inputs)[0] self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic") def test_hidden_states_output(self): pass def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv"] ignore_init = ["lstm"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif not any(x in name for x in ignore_init): self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_identity_shortcut(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_conv_shortcut = False self.model_tester.create_and_check_model_forward(config, inputs_dict) def normalize(arr): norm = np.linalg.norm(arr) normalized_arr = arr / norm return normalized_arr def compute_rmse(arr1, arr2): arr1_normalized = normalize(arr1) arr2_normalized = normalize(arr2) return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean()) @slow @require_torch class EncodecIntegrationTest(unittest.TestCase): def test_integration_24kHz(self): expected_rmse = { "1.5": 0.0025, "24.0": 0.0015, } expected_codesums = { "1.5": [371955], "24.0": [6659962], } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "facebook/encodec_24khz" model = EncodecModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to(torch_device) for bandwidth, expected_rmse in expected_rmse.items(): with torch.no_grad(): encoder_outputs = model.encode(inputs["input_values"], bandwidth=float(bandwidth)) audio_code_sums = [a[0].sum().cpu().item() for a in encoder_outputs[0]] self.assertListEqual(audio_code_sums, expected_codesums[bandwidth]) audio_codes, scales = encoder_outputs.to_tuple() input_values_dec = model.decode(audio_codes, scales, inputs["padding_mask"])[0] input_values_enc_dec = model( inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth) )[-1] self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3)) self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape) arr = inputs["input_values"][0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(rmse < expected_rmse) def test_integration_48kHz(self): expected_rmse = { "3.0": 0.001, "24.0": 0.0005, } expected_codesums = { "3.0": [144259, 146765, 156435, 176871, 161971], "24.0": [1568553, 1294948, 1306190, 1464747, 1663150], } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "facebook/encodec_48khz" model = EncodecModel.from_pretrained(model_id).to(torch_device) model = model.eval() processor = AutoProcessor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] audio_sample = np.array([audio_sample, audio_sample]) inputs = processor(raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt").to( torch_device ) for bandwidth, expected_rmse in expected_rmse.items(): with torch.no_grad(): encoder_outputs = model.encode( inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth), return_dict=False ) audio_code_sums = [a[0].sum().cpu().item() for a in encoder_outputs[0]] self.assertListEqual(audio_code_sums, expected_codesums[bandwidth]) audio_codes, scales = encoder_outputs input_values_dec = model.decode(audio_codes, scales, inputs["padding_mask"])[0] input_values_enc_dec = model( inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth) )[-1] self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3)) self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape) arr = inputs["input_values"][0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(rmse < expected_rmse) def test_batch_48kHz(self): expected_rmse = { "3.0": 0.001, "24.0": 0.0005, } expected_codesums = { "3.0": [ [72410, 79137, 76694, 90854, 73023, 82980, 72707, 54842], [85561, 81870, 76953, 48967, 79315, 85442, 81479, 107241], ], "24.0": [ [72410, 79137, 76694, 90854, 73023, 82980, 72707, 54842], [85561, 81870, 76953, 48967, 79315, 85442, 81479, 107241], ], } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "facebook/encodec_48khz" model = EncodecModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id, chunk_length_s=1, overlap=0.01) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_samples = [ np.array([audio_sample["array"], audio_sample["array"]]) for audio_sample in librispeech_dummy[-2:]["audio"] ] inputs = processor(raw_audio=audio_samples, sampling_rate=processor.sampling_rate, return_tensors="pt") input_values = inputs["input_values"].to(torch_device) for bandwidth, expected_rmse in expected_rmse.items(): with torch.no_grad(): encoder_outputs = model.encode(input_values, bandwidth=float(bandwidth), return_dict=False) audio_code_sums_0 = [a[0][0].sum().cpu().item() for a in encoder_outputs[0]] audio_code_sums_1 = [a[0][1].sum().cpu().item() for a in encoder_outputs[0]] self.assertListEqual(audio_code_sums_0, expected_codesums[bandwidth][0]) self.assertListEqual(audio_code_sums_1, expected_codesums[bandwidth][1]) audio_codes, scales = encoder_outputs input_values_dec = model.decode(audio_codes, scales)[0] input_values_enc_dec = model(input_values, bandwidth=float(bandwidth))[-1] self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3)) self.assertTrue(input_values.shape == input_values_enc_dec.shape) arr = input_values[0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(rmse < expected_rmse)
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license returns a tiny configuration by default first forward pass create hypothetical multiple next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice special case for forpretraining model this regression test was failing with pytorch 1 3 ernieformultiplechoice behaves incorrectly in jit environments coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license returns a tiny configuration by default first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice special case for forpretraining model this regression test was failing with pytorch 1 3 ernieformultiplechoice behaves incorrectly in jit environments
import os import tempfile import unittest from transformers import ErnieConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ) from transformers.models.ernie.modeling_ernie import ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST class ErnieModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return ErnieConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ErnieModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = ErnieModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = ErnieForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ErnieForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_model_for_causal_lm_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = ErnieForCausalLM(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = ErnieForCausalLM(config=config).to(torch_device).eval() outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ErnieForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ErnieForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ErnieForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ErnieForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ErnieForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = ErnieForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ErnieModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ErnieModel, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (ErnieForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": ErnieModel, "fill-mask": ErnieForMaskedLM, "question-answering": ErnieForQuestionAnswering, "text-classification": ErnieForSequenceClassification, "text-generation": ErnieForCausalLM, "token-classification": ErnieForTokenClassification, "zero-shot": ErnieForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = ErnieModelTester(self) self.config_tester = ConfigTester(self, config_class=ErnieConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ErnieModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class == ErnieForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "ernie.pt")) loaded = torch.jit.load(os.path.join(tmp, "ernie.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device))
codingutf8 2023 the huggingface inc and baidu team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch erniem model import unittest from transformers import erniemconfig istorchavailable from transformers testingutils import requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import erniemforinformationextraction erniemformultiplechoice erniemforquestionanswering erniemforsequenceclassification erniemfortokenclassification erniemmodel from transformers models erniem modelingerniem import erniempretrainedmodelarchivelist class erniemmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids inputmask sequencelabels tokenlabels choicelabels def prepareconfigandinputsforuiemself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength config self getconfig return config inputids inputmask def getconfigself return erniemconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerrangeself initializerrange def createandcheckmodelself config inputids inputmask sequencelabels tokenlabels choicelabels model erniemmodelconfigconfig model totorchdevice model eval result modelinputids returndicttrue self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforquestionanswering self config inputids inputmask sequencelabels tokenlabels choicelabels model erniemforquestionansweringconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforinformationextraction self config inputids inputmask sequencelabels tokenlabels choicelabels model erniemforinformationextractionconfigconfig model totorchdevice model eval sequencelabels torch oneslikeinputids dtypetorch float32 result model inputids attentionmaskinputmask startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforsequenceclassification self config inputids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model erniemforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model erniemfortokenclassificationconfigconfig model totorchdevice model eval inputids totorchdevice inputmask totorchdevice tokenlabels totorchdevice result modelinputids attentionmaskinputmask labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckformultiplechoice self config inputids inputmask sequencelabels tokenlabels choicelabels config numchoices self numchoices model erniemformultiplechoiceconfigconfig model totorchdevice model eval multiplechoiceinputsids inputids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceinputmask inputmask unsqueeze1 expand1 self numchoices 1 contiguous result model multiplechoiceinputsids attentionmaskmultiplechoiceinputmask labelschoicelabels self parent assertequalresult logits shape self batchsize self numchoices def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class erniemmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses erniemmodel erniemformultiplechoice erniemforquestionanswering erniemforsequenceclassification erniemfortokenclassification if istorchavailable else allgenerativemodelclasses pipelinemodelmapping featureextraction erniemmodel questionanswering erniemforquestionanswering textclassification erniemforsequenceclassification tokenclassification erniemfortokenclassification zeroshot erniemforsequenceclassification if istorchavailable else testtorchscript false todo fix the failed tests when this model gets more usage def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname if pipelinetestcassename qapipelinetests return true return false def setupself self modeltester erniemmodeltesterself self configtester configtesterself configclasserniemconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testforinformationextractionself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforinformationextractionconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in erniempretrainedmodelarchivelist 1 model erniemmodel frompretrainedmodelname self assertisnotnonemodel requiretorch class erniemmodelintegrationtestunittest testcase slow def testinferencemodelself model erniemmodel frompretrainedsusnatoerniembasepytorch model eval inputids torch tensor0 1 2 3 4 5 output modelinputids0 todo replace vocab size hiddensize 768 expectedshape torch size1 6 hiddensize self assertequaloutput shape expectedshape expectedslice torch tensor 0 0012 0 1245 0 0214 0 0742 0 0244 0 0771 0 0333 0 1164 0 1554 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e3 coding utf 8 2023 the huggingface inc and baidu team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch erniem model todo fix the failed tests when this model gets more usage todo replace vocab size
import unittest from transformers import ErnieMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( ErnieMForInformationExtraction, ErnieMForMultipleChoice, ErnieMForQuestionAnswering, ErnieMForSequenceClassification, ErnieMForTokenClassification, ErnieMModel, ) from transformers.models.ernie_m.modeling_ernie_m import ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST class ErnieMModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_uiem(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_ids, input_mask def get_config(self): return ErnieMConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = ErnieMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, return_dict=True) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ErnieMForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_information_extraction( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ErnieMForInformationExtraction(config=config) model.to(torch_device) model.eval() sequence_labels = torch.ones_like(input_ids, dtype=torch.float32) result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ErnieMForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ErnieMForTokenClassification(config=config) model.to(torch_device) model.eval() input_ids.to(torch_device) input_mask.to(torch_device) token_labels.to(torch_device) result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = ErnieMForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ErnieMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ErnieMModel, ErnieMForMultipleChoice, ErnieMForQuestionAnswering, ErnieMForSequenceClassification, ErnieMForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = () pipeline_model_mapping = ( { "feature-extraction": ErnieMModel, "question-answering": ErnieMForQuestionAnswering, "text-classification": ErnieMForSequenceClassification, "token-classification": ErnieMForTokenClassification, "zero-shot": ErnieMForSequenceClassification, } if is_torch_available() else {} ) test_torchscript = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests": return True return False def setUp(self): self.model_tester = ErnieMModelTester(self) self.config_tester = ConfigTester(self, config_class=ErnieMConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_information_extraction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_information_extraction(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ErnieMModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class ErnieMModelIntegrationTest(unittest.TestCase): @slow def test_inference_model(self): model = ErnieMModel.from_pretrained("susnato/ernie-m-base_pytorch") model.eval() input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] hidden_size = 768 expected_shape = torch.Size((1, 6, hidden_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0012, 0.1245, -0.0214], [-0.0742, 0.0244, -0.0771], [-0.0333, 0.1164, -0.1554]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3))
codingutf8 2023 the huggingface inc and baidu team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch erniem model import unittest from transformers import erniemtokenizer from transformers testingutils import gettestsdir requiresentencepiece requiretokenizers slow from testtokenizationcommon import tokenizertestermixin samplevocab gettestsdirfixturesspiece model requiresentencepiece requiretokenizers class erniemtokenizationtesttokenizertestermixin unittest testcase tokenizerclass erniemtokenizer testseq2seq false testsentencepiece true testrusttokenizer false testsentencepieceignorecase false def setupself super setup we have a sentencepiece fixture for testing tokenizer erniemtokenizersamplevocab unktokenunk padtokenpad tokenizer savepretrainedself tmpdirname def getinputoutputtextsself tokenizer inputtext this is a test outputtext this is a test return inputtext outputtext def testconverttokenandidself erniemtokenizerpaddlenlp implementation outputs 9 instead of 9 so to mimic that 9 is changed to 9 coding utf 8 2023 the huggingface inc and baidu team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch erniem model we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token erniemtokenizer paddlenlp implementation outputs 9 instead of _9 so to mimic that _9 is changed to 9 fmt skip
import unittest from transformers import ErnieMTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class ErnieMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = ErnieMTokenizer test_seq2seq = False test_sentencepiece = True test_rust_tokenizer = False test_sentencepiece_ignore_case = False def setUp(self): super().setUp() tokenizer = ErnieMTokenizer(SAMPLE_VOCAB, unk_token="<unk>", pad_token="<pad>") tokenizer.save_pretrained(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_text = "this is a test" output_text = "this is a test" return input_text, output_text def test_convert_token_and_id(self): token = "<pad>" token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<pad>") self.assertEqual(vocab_keys[1], "<unk>") self.assertEqual(vocab_keys[-1], "▁eloquent") self.assertEqual(len(vocab_keys), 30_000) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 30_000) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_full_tokenizer(self): tokenizer = ErnieMTokenizer(SAMPLE_VOCAB, do_lower_case=True, unk_token="<unk>", pad_token="<pad>") tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁this", "▁is", "▁a", "▁test"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [48, 25, 21, 1289]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, ["▁i", "▁was", "▁born", "▁in", "9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [31, 23, 386, 19, 518, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, ["▁i", "▁was", "▁born", "▁in", "9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."], ) def test_sequence_builders(self): tokenizer = ErnieMTokenizer(SAMPLE_VOCAB, unk_token="<unk>", pad_token="<pad>") text = tokenizer.encode("sequence builders") text_2 = tokenizer.encode("multi-sequence build") encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + [ tokenizer.sep_token_id ] + text_2 + [tokenizer.sep_token_id] @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 9, 304, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 5, 5, 5, 16, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 6460, 1328, 4589, 42, 122009, 115774, 23, 3559, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="susnato/ernie-m-base_pytorch", sequences=[ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over32+ pretrained " "models in100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ], )
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch esm model import unittest from transformers import esmconfig istorchavailable from transformers testingutils import testcaseplus requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import esmformaskedlm esmforsequenceclassification esmfortokenclassification esmmodel from transformers models esm modelingesm import esmpretrainedmodelarchivelist esmembeddings createpositionidsfrominputids copied from tests testmodelingroberta class esmmodeltester def init self parent batchsize13 seqlength7 istrainingfalse useinputmasktrue usetokentypeidsfalse uselabelstrue vocabsize33 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids inputmask sequencelabels tokenlabels choicelabels def getconfigself return esmconfig vocabsizeself vocabsize hiddensizeself hiddensize padtokenid1 numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerrangeself initializerrange def createandcheckmodelself config inputids inputmask sequencelabels tokenlabels choicelabels model esmmodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask result modelinputids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckformaskedlm self config inputids inputmask sequencelabels tokenlabels choicelabels model esmformaskedlmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckfortokenclassification self config inputids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model esmfortokenclassificationconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckforwardandbackwards self config inputids inputmask sequencelabels tokenlabels choicelabels gradientcheckpointingfalse model esmformaskedlmconfig if gradientcheckpointing model gradientcheckpointingenable model totorchdevice result modelinputids attentionmaskinputmask labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize result loss backward def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class esmmodeltestmodeltestermixin pipelinetestermixin unittest testcase testmismatchedshapes false allmodelclasses esmformaskedlm esmmodel esmforsequenceclassification esmfortokenclassification if istorchavailable else allgenerativemodelclasses pipelinemodelmapping featureextraction esmmodel fillmask esmformaskedlm textclassification esmforsequenceclassification tokenclassification esmfortokenclassification zeroshot esmforsequenceclassification if istorchavailable else testsequenceclassificationproblemtypes true modelsplitpercents 0 5 0 8 0 9 def setupself self modeltester esmmodeltesterself self configtester configtesterself configclassesmconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testesmgradientcheckpointingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforwardandbackwardsconfigandinputs gradientcheckpointingtrue slow def testmodelfrompretrainedself for modelname in esmpretrainedmodelarchivelist 1 model esmmodel frompretrainedmodelname self assertisnotnonemodel def testcreatepositionidsrespectspaddingindexself config self modeltester prepareconfigandinputs0 model esmembeddingsconfigconfig inputids torch astensor12 31 13 model paddingidx expectedpositions torch astensor 0 model paddingidx 1 1 model paddingidx 1 2 model paddingidx 1 model paddingidx positionids createpositionidsfrominputidsinputids model paddingidx self assertequalpositionids shape expectedpositions shape self asserttruetorch alltorch eqpositionids expectedpositions def testcreatepositionidsfrominputsembedsself config self modeltester prepareconfigandinputs0 embeddings esmembeddingsconfigconfig inputsembeds torch empty2 4 30 expectedsinglepositions 0 embeddings paddingidx 1 1 embeddings paddingidx 1 2 embeddings paddingidx 1 3 embeddings paddingidx 1 expectedpositions torch astensorexpectedsinglepositions expectedsinglepositions positionids embeddings createpositionidsfrominputsembedsinputsembeds self assertequalpositionids shape expectedpositions shape self asserttruetorch alltorch eqpositionids expectedpositions unittest skipesm does not support embedding resizing def testresizeembeddingsuntiedself pass unittest skipesm does not support embedding resizing def testresizetokensembeddingsself pass requiretorch class esmmodelintegrationtesttestcaseplus slow def testinferencemaskedlmself with torch nograd model esmformaskedlm frompretrainedfacebookesm2t68mur50d model eval inputids torch tensor0 1 2 3 4 5 output modelinputids0 vocabsize 33 expectedshape torch size1 6 vocabsize self assertequaloutput shape expectedshape expectedslice torch tensor 8 9215 10 5898 6 4671 6 3967 13 9114 1 1212 7 7812 13 9516 3 7406 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 slow def testinferencenoheadself with torch nograd model esmmodel frompretrainedfacebookesm2t68mur50d model eval inputids torch tensor0 6 4 13 5 4 16 12 11 7 2 output modelinputids0 compare the actual values for a slice expectedslice torch tensor 0 1444 0 5413 0 3248 0 3034 0 0053 0 3108 0 3228 0 2499 0 3415 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch esm model copied from tests test_modeling_roberta ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is esmembeddings padding_idx 1 ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is esmembeddings padding_idx 1 compare the actual values for a slice
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class EsmModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=33, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return EsmConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = EsmModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = EsmForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = EsmForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, gradient_checkpointing=False, ): model = EsmForMaskedLM(config) if gradient_checkpointing: model.gradient_checkpointing_enable() model.to(torch_device) result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class EsmModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_mismatched_shapes = False all_model_classes = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = () pipeline_model_mapping = ( { "feature-extraction": EsmModel, "fill-mask": EsmForMaskedLM, "text-classification": EsmForSequenceClassification, "token-classification": EsmForTokenClassification, "zero-shot": EsmForSequenceClassification, } if is_torch_available() else {} ) test_sequence_classification_problem_types = True model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = EsmModelTester(self) self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_esm_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) @slow def test_model_from_pretrained(self): for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = EsmModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_create_position_ids_respects_padding_index(self): config = self.model_tester.prepare_config_and_inputs()[0] model = EsmEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): config = self.model_tester.prepare_config_and_inputs()[0] embeddings = EsmEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) @unittest.skip("Esm does not support embedding resizing") def test_resize_embeddings_untied(self): pass @unittest.skip("Esm does not support embedding resizing") def test_resize_tokens_embeddings(self): pass @require_torch class EsmModelIntegrationTest(TestCasePlus): @slow def test_inference_masked_lm(self): with torch.no_grad(): model = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D") model.eval() input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] vocab_size = 33 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_no_head(self): with torch.no_grad(): model = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D") model.eval() input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) output = model(input_ids)[0] expected_slice = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch esm model import unittest from transformers import esmconfig istorchavailable from transformers testingutils import testcaseplus requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers models esm modelingesmfold import esmforproteinfolding class esmfoldmodeltester def init self parent batchsize13 seqlength7 istrainingfalse useinputmasktrue usetokentypeidsfalse uselabelsfalse vocabsize19 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids inputmask sequencelabels tokenlabels choicelabels def getconfigself esmfoldconfig trunk numblocks 2 sequencestatedim 64 pairwisestatedim 16 sequenceheadwidth 4 pairwiseheadwidth 4 positionbins 4 chunksize 16 structuremodule ipadim 16 numangles 7 numblocks 2 numheadsipa 4 pairwisedim 16 resnetdim 16 sequencedim 48 fp16esm false lddtheadhiddim 16 config esmconfig vocabsize33 hiddensizeself hiddensize padtokenid1 numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerrangeself initializerrange isfoldingmodeltrue esmfoldconfigesmfoldconfig return config def createandcheckmodelself config inputids inputmask sequencelabels tokenlabels choicelabels model esmforproteinfoldingconfigconfig float model totorchdevice model eval result modelinputids attentionmaskinputmask result modelinputids result modelinputids self parent assertequalresult positions shape 2 self batchsize self seqlength 14 3 self parent assertequalresult angles shape 2 self batchsize self seqlength 7 2 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class esmfoldmodeltestmodeltestermixin pipelinetestermixin unittest testcase testmismatchedshapes false allmodelclasses esmforproteinfolding if istorchavailable else allgenerativemodelclasses pipelinemodelmapping if istorchavailable else testsequenceclassificationproblemtypes false def setupself self modeltester esmfoldmodeltesterself self configtester configtesterself configclassesmconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipdoes not support attention outputs def testattentionoutputsself pass unittest skip def testcorrectmissingkeysself pass unittest skipesm does not support embedding resizing def testresizeembeddingsuntiedself pass unittest skipesm does not support embedding resizing def testresizetokensembeddingsself pass unittest skipesmfold does not support passing input embeds def testinputsembedsself pass unittest skipesmfold does not support head pruning def testheadpruningself pass unittest skipesmfold does not support head pruning def testheadpruningintegrationself pass unittest skipesmfold does not support head pruning def testheadpruningsaveloadfromconfiginitself pass unittest skipesmfold does not support head pruning def testheadpruningsaveloadfrompretrainedself pass unittest skipesmfold does not support head pruning def testheadmaskingself pass unittest skipesmfold does not output hidden states in the normal way def testhiddenstatesoutputself pass unittest skipesmfold does not output hidden states in the normal way def testretaingradhiddenstatesattentionsself pass unittest skipesmfold only has one output format def testmodeloutputsequivalenceself pass unittest skipthis test doesn t work for esmfold and doesn t test core functionality def testsaveloadfastinitfrombaseself pass unittest skipesmfold does not support input chunking def testfeedforwardchunkingself pass unittest skipesmfold doesn t respect you and it certainly doesn t respect your initialization arguments def testinitializationself pass unittest skipesmfold doesn t support torchscript compilation def testtorchscriptoutputattentionsself pass unittest skipesmfold doesn t support torchscript compilation def testtorchscriptoutputhiddenstateself pass unittest skipesmfold doesn t support torchscript compilation def testtorchscriptsimpleself pass unittest skipesmfold doesn t support data parallel def testmultigpudataparallelforwardself pass requiretorch class esmmodelintegrationtesttestcaseplus slow def testinferenceproteinfoldingself model esmforproteinfolding frompretrainedfacebookesmfoldv1 float model eval inputids torch tensor0 6 4 13 5 4 16 12 11 7 2 positionoutputs modelinputidspositions expectedslice torch tensor2 5828 0 7993 10 9334 dtypetorch float32 self asserttruetorch allclosepositionoutputs0 0 0 0 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch esm model
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class EsmFoldModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_token_type_ids=False, use_labels=False, vocab_size=19, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): esmfold_config = { "trunk": { "num_blocks": 2, "sequence_state_dim": 64, "pairwise_state_dim": 16, "sequence_head_width": 4, "pairwise_head_width": 4, "position_bins": 4, "chunk_size": 16, "structure_module": { "ipa_dim": 16, "num_angles": 7, "num_blocks": 2, "num_heads_ipa": 4, "pairwise_dim": 16, "resnet_dim": 16, "sequence_dim": 48, }, }, "fp16_esm": False, "lddt_head_hid_dim": 16, } config = EsmConfig( vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=True, esmfold_config=esmfold_config, ) return config def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = EsmForProteinFolding(config=config).float() model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) result = model(input_ids) self.parent.assertEqual(result.positions.shape, (2, self.batch_size, self.seq_length, 14, 3)) self.parent.assertEqual(result.angles.shape, (2, self.batch_size, self.seq_length, 7, 2)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class EsmFoldModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_mismatched_shapes = False all_model_classes = (EsmForProteinFolding,) if is_torch_available() else () all_generative_model_classes = () pipeline_model_mapping = {} if is_torch_available() else {} test_sequence_classification_problem_types = False def setUp(self): self.model_tester = EsmFoldModelTester(self) self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip("Does not support attention outputs") def test_attention_outputs(self): pass @unittest.skip def test_correct_missing_keys(self): pass @unittest.skip("Esm does not support embedding resizing") def test_resize_embeddings_untied(self): pass @unittest.skip("Esm does not support embedding resizing") def test_resize_tokens_embeddings(self): pass @unittest.skip("ESMFold does not support passing input embeds!") def test_inputs_embeds(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning_integration(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning_save_load_from_config_init(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning_save_load_from_pretrained(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_headmasking(self): pass @unittest.skip("ESMFold does not output hidden states in the normal way.") def test_hidden_states_output(self): pass @unittest.skip("ESMfold does not output hidden states in the normal way.") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip("ESMFold only has one output format.") def test_model_outputs_equivalence(self): pass @unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality") def test_save_load_fast_init_from_base(self): pass @unittest.skip("ESMFold does not support input chunking.") def test_feed_forward_chunking(self): pass @unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.") def test_initialization(self): pass @unittest.skip("ESMFold doesn't support torchscript compilation.") def test_torchscript_output_attentions(self): pass @unittest.skip("ESMFold doesn't support torchscript compilation.") def test_torchscript_output_hidden_state(self): pass @unittest.skip("ESMFold doesn't support torchscript compilation.") def test_torchscript_simple(self): pass @unittest.skip("ESMFold doesn't support data parallel.") def test_multi_gpu_data_parallel_forward(self): pass @require_torch class EsmModelIntegrationTest(TestCasePlus): @slow def test_inference_protein_folding(self): model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1").float() model.eval() input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) position_outputs = model(input_ids)["positions"] expected_slice = torch.tensor([2.5828, 0.7993, -10.9334], dtype=torch.float32) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], expected_slice, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests testmodelingtfroberta also check the case where encoder outputs are not passed test the base model configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelasdecoderself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckmodelasdecoderconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in tfesmpretrainedmodelarchivelist 1 model tfesmmodel frompretrainedmodelname self assertisnotnonemodel unittest skipprotein models do not support embedding resizing def testresizetokenembeddingsself pass unittest skipprotein models do not support embedding resizing def testsaveloadafterresizetokenembeddingsself pass def testmodelcommonattributesself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig assert isinstancemodel getinputembeddings tf keras layers layer if modelclass is tfesmformaskedlm output embedding test differs from the main test because they re a matrix not a layer name model getbias assert isinstancename dict for k v in name items assert isinstancev tf variable else x model getoutputembeddings assert x is none name model getbias assert name is none requiretf class tfesmmodelintegrationtestunittest testcase slow def testinferencemaskedlmself model tfesmformaskedlm frompretrainedfacebookesm2t68mur50d inputids tf constant0 1 2 3 4 5 output modelinputids0 expectedshape 1 6 33 self assertequallistoutput numpy shape expectedshape compare the actual values for a slice expectedslice tf constant 8 921518 10 589814 6 4671307 6 3967156 13 911377 1 1211915 7 781247 13 951557 3 740592 self asserttruenumpy allcloseoutput 3 3 numpy expectedslice numpy atol1e2 slow def testinferencenoheadself model tfesmmodel frompretrainedfacebookesm2t68mur50d inputids tf constant0 6 4 13 5 4 16 12 11 7 2 output modelinputids0 compare the actual values for a slice expectedslice tf constant 0 14443092 0 54125327 0 3247739 0 30340484 0 00526676 0 31077722 0 32278043 0 24987096 0 3414628 self asserttruenumpy allcloseoutput 3 3 numpy expectedslice numpy atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests test_modeling_tf_roberta also check the case where encoder outputs are not passed test the base model test the base model as a decoder of an encoder decoder architecture is_deocder true cross_attention pass encoder outputs output embedding test differs from the main test because they re a matrix not a layer compare the actual values for a slice compare the actual values for a slice
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class TFEsmModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = EsmConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, pad_token_id=1, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = TFEsmModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFEsmModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, encoder_hidden_states=encoder_hidden_states) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFEsmForMaskedLM(config=config) result = model([input_ids, input_mask]) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFEsmForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFEsmModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFEsmModel, "fill-mask": TFEsmForMaskedLM, "text-classification": TFEsmForSequenceClassification, "token-classification": TFEsmForTokenClassification, "zero-shot": TFEsmForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFEsmModelTester(self) self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFEsmModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("Protein models do not support embedding resizing.") def test_resize_token_embeddings(self): pass @unittest.skip("Protein models do not support embedding resizing.") def test_save_load_after_resize_token_embeddings(self): pass def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class is TFEsmForMaskedLM: name = model.get_bias() assert isinstance(name, dict) for k, v in name.items(): assert isinstance(v, tf.Variable) else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None @require_tf class TFEsmModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 33] self.assertEqual(list(output.numpy().shape), expected_shape) expected_slice = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-2)) @slow def test_inference_no_head(self): model = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D") input_ids = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) output = model(input_ids)[0] expected_slice = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
codingutf8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test tokenize with special tokens tokenizers self gettokenizersfasttrue for tokenizer in tokenizers with self subtestftokenizer class name specialtoken1 unk specialtoken2 mask token1 tokenizer tokenizespecialtoken1 token2 tokenizer tokenizespecialtoken2 self assertequallentoken1 1 self assertequallentoken2 1 self assertequaltoken10 specialtoken1 self assertequaltoken20 specialtoken2 coding utf 8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fmt skip test tokenize with special tokens
import os import tempfile import unittest from typing import List from transformers.models.esm.tokenization_esm import VOCAB_FILES_NAMES, EsmTokenizer from transformers.testing_utils import require_tokenizers from transformers.tokenization_utils import PreTrainedTokenizer from transformers.tokenization_utils_base import PreTrainedTokenizerBase @require_tokenizers class ESMTokenizationTest(unittest.TestCase): tokenizer_class = EsmTokenizer def setUp(self): super().setUp() self.tmpdirname = tempfile.mkdtemp() vocab_tokens: List[str] = ["<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>"] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]: return [self.get_tokenizer(**kwargs)] def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def test_tokenizer_single_example(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("LAGVS") self.assertListEqual(tokens, ["L", "A", "G", "V", "S"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [4, 5, 6, 7, 8]) def test_tokenizer_encode_single(self): tokenizer = self.tokenizer_class(self.vocab_file) seq = "LAGVS" self.assertListEqual(tokenizer.encode(seq), [0, 4, 5, 6, 7, 8, 2]) def test_tokenizer_call_no_pad(self): tokenizer = self.tokenizer_class(self.vocab_file) seq_batch = ["LAGVS", "WCB"] tokens_batch = tokenizer(seq_batch, padding=False)["input_ids"] self.assertListEqual(tokens_batch, [[0, 4, 5, 6, 7, 8, 2], [0, 22, 23, 25, 2]]) def test_tokenizer_call_pad(self): tokenizer = self.tokenizer_class(self.vocab_file) seq_batch = ["LAGVS", "WCB"] tokens_batch = tokenizer(seq_batch, padding=True)["input_ids"] self.assertListEqual(tokens_batch, [[0, 4, 5, 6, 7, 8, 2], [0, 22, 23, 25, 2, 1, 1]]) def test_tokenize_special_tokens(self): tokenizers = self.get_tokenizers(fast=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): SPECIAL_TOKEN_1 = "<unk>" SPECIAL_TOKEN_2 = "<mask>" token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1) token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2) self.assertEqual(len(token_1), 1) self.assertEqual(len(token_2), 1) self.assertEqual(token_1[0], SPECIAL_TOKEN_1) self.assertEqual(token_2[0], SPECIAL_TOKEN_2)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch falcon model import unittest from parameterized import parameterized from transformers import automodelforcausallm autotokenizer falconconfig istorchavailable setseed from transformers testingutils import requirebitsandbytes requiretorch slow torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import falconforcausallm falconforquestionanswering falconforsequenceclassification falconfortokenclassification falconmodel class falconmodeltester def init self parent batchsize3 seqlength7 istrainingtrue useinputmasktrue usetokentypeidsfalse uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels def getconfigself return falconconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange padtokenid1 newdecoderarchitecturetrue def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model falconmodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckmodelasdecoder self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask config addcrossattention true model falconmodelconfig model totorchdevice model eval result model inputids attentionmaskinputmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask result model inputids attentionmaskinputmask encoderhiddenstatesencoderhiddenstates result modelinputids attentionmaskinputmask self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforcausallm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask model falconforcausallmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckdecodermodelpastlargeinputs self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask config isdecoder true config addcrossattention true model falconforcausallmconfigconfig model totorchdevice model eval first forward pass outputs model inputids attentionmaskinputmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask usecachetrue pastkeyvalues outputs pastkeyvalues create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextmask idstensorself batchsize 3 vocabsize2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catinputmask nextmask dim1 outputfromnopast model nextinputids attentionmasknextattentionmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask outputhiddenstatestrue hiddenstates0 outputfrompast model nexttokens attentionmasknextattentionmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask pastkeyvaluespastkeyvalues outputhiddenstatestrue hiddenstates0 select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class falconmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses falconmodel falconforcausallm falconforsequenceclassification falconfortokenclassification falconforquestionanswering if istorchavailable else allgenerativemodelclasses falconforcausallm if istorchavailable else pipelinemodelmapping featureextraction falconmodel questionanswering falconforquestionanswering textclassification falconforsequenceclassification textgeneration falconforcausallm tokenclassification falconfortokenclassification zeroshot falconforsequenceclassification if istorchavailable else testheadmasking false testpruning false todo ydshieh check this see https app circleci compipelinesgithubhuggingfacetransformers79245workflows9490ef5879c2410d8f51e3495156cf9cjobs1012146 def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname return true def setupself self modeltester falconmodeltesterself self configtester configtesterself configclassfalconconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testpositionembeddingtypesself config inputs self modeltester prepareconfigandinputs for alibi in true false config alibi alibi self modeltester createandcheckmodelconfig inputs def testfalconsequenceclassificationmodelself config inputdict self modeltester prepareconfigandinputsforcommon config numlabels 3 inputids inputdictinputids attentionmask inputids ne1 totorchdevice sequencelabels idstensorself modeltester batchsize self modeltester typesequencelabelsize model falconforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskattentionmask labelssequencelabels self assertequalresult logits shape self modeltester batchsize self modeltester numlabels def testfalconsequenceclassificationmodelforsinglelabelself config inputdict self modeltester prepareconfigandinputsforcommon config numlabels 3 config problemtype singlelabelclassification inputids inputdictinputids attentionmask inputids ne1 totorchdevice sequencelabels idstensorself modeltester batchsize self modeltester typesequencelabelsize model falconforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskattentionmask labelssequencelabels self assertequalresult logits shape self modeltester batchsize self modeltester numlabels def testfalconsequenceclassificationmodelformultilabelself config inputdict self modeltester prepareconfigandinputsforcommon config numlabels 3 config problemtype multilabelclassification inputids inputdictinputids attentionmask inputids ne1 totorchdevice sequencelabels idstensor self modeltester batchsize config numlabels self modeltester typesequencelabelsize totorch float model falconforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskattentionmask labelssequencelabels self assertequalresult logits shape self modeltester batchsize self modeltester numlabels def testpastkeyvaluesformatself falcon can have different numbers of kvheads than the number of query heads so we need to override this test to use the right head counts for modelclass in self allgenerativemodelclasses config inputs self modeltester prepareconfigandinputsforcommon if it doesn t support cache pass the test if not hasattrconfig usecache return model modelclassconfig totorchdevice if usecache not in inputs inputsusecache true outputs modelinputs if pastkeyvalues is not returned pass the test e g rwkv uses a different cache name and format if pastkeyvalues not in outputs return numhiddenlayers getattrconfig decoderlayers none or getattrconfig numdecoderlayers none or config numhiddenlayers numattentionheads getattrconfig numkvheads config numattentionheads embeddim getattrconfig dmodel config hiddensize perheadembeddim embeddim numattentionheads pastkv outputspastkeyvalues self assertequallenpastkv numhiddenlayers batchsize seqlength inputsinputids shape for i in rangenumhiddenlayers if config newdecoderarchitecture numattentionheads config numattentionheads elif config multiquery numattentionheads 1 self assertequallenpastkv0 2 k v for the decoder 2 self assertequal pastkvi0 shape batchsize numattentionheads seqlength perheadembeddim self assertequal pastkvi1 shape batchsize numattentionheads seqlength perheadembeddim parameterized expandlinear dynamic def testmodelropescalingself scalingtype config self modeltester prepareconfigandinputsforcommon shortinput idstensor1 10 config vocabsize longinput idstensor1 intconfig maxpositionembeddings 1 5 config vocabsize setseed42 fixed seed at init time so the two models get the same random weights originalmodel falconmodelconfig originalmodel totorchdevice originalmodel eval originalshortoutput originalmodelshortinput lasthiddenstate originallongoutput originalmodellonginput lasthiddenstate setseed42 fixed seed at init time so the two models get the same random weights config ropescaling type scalingtype factor 10 0 scaledmodel falconmodelconfig scaledmodel totorchdevice scaledmodel eval scaledshortoutput scaledmodelshortinput lasthiddenstate scaledlongoutput scaledmodellonginput lasthiddenstate dynamic scaling does not change the rope embeddings until it receives an input longer than the original maximum sequence length so the outputs for the short input should match if scalingtype dynamic self asserttruetorch allcloseoriginalshortoutput scaledshortoutput atol1e5 else self assertfalsetorch allcloseoriginalshortoutput scaledshortoutput atol1e5 the output should be different for long inputs self assertfalsetorch allcloseoriginallongoutput scaledlongoutput atol1e5 requiretorch class falconlanguagegenerationtestunittest testcase slow def testlmgeneratefalconself tokenizer autotokenizer frompretrainedrocketknight1falconrw1b model falconforcausallm frompretrainedrocketknight1falconrw1b model eval model totorchdevice inputs tokenizermy favorite food is returntensorspt totorchdevice expectedoutput my favorite food is pizza i love it so much that i have a pizza party every year for my birthday outputids model generateinputs dosamplefalse maxnewtokens19 outputstr tokenizer batchdecodeoutputids0 self assertequaloutputstr expectedoutput slow def testlmgenerationbigmodelsself the big models are way too big for the ci so we use tiny random models that resemble their architectures but with much smaller and fewer layers for repo in rocketknight1tinyrandomfalcon7b rocketknight1tinyrandomfalcon40b tokenizer autotokenizer frompretrainedrepo model falconforcausallm frompretrainedrepo model eval model totorchdevice inputs tokenizermy favorite food is returntensorspt totorchdevice we just test that these run without errors the models are randomly initialized and so the actual text outputs will be garbage model generateinputs dosamplefalse maxnewtokens4 model generateinputs dosampletrue maxnewtokens4 model generateinputs numbeams2 maxnewtokens4 slow def testlmgenerationusecacheself the big models are way too big for the ci so we use tiny random models that resemble their architectures but with much smaller and fewer layers with torch nograd for repo in rocketknight1falconrw1b rocketknight1tinyrandomfalcon7b rocketknight1tinyrandomfalcon40b tokenizer autotokenizer frompretrainedrepo model falconforcausallm frompretrainedrepo model eval model todevicetorchdevice inputs tokenizermy favorite food is returntensorspt totorchdevice test results are the same with and without cache outputsnocache model generateinputs dosamplefalse maxnewtokens20 usecachefalse outputscache model generateinputs dosamplefalse maxnewtokens20 usecachetrue self asserttrueoutputscache outputsnocache sum item 0 requirebitsandbytes slow def testbatchedgenerationself tokenizer autotokenizer frompretrainedtiiuaefalcon7b paddingsideleft tokenizer padtoken tokenizer eostoken model automodelforcausallm frompretrained tiiuaefalcon7b devicemapauto loadin4bittrue testtext a sequence 1 2 should generate the rest of the sequence unpaddedinputs tokenizertesttext returntensorspt tocuda 0 unpaddedgenout model generateunpaddedinputs maxnewtokens20 unpaddedgentext tokenizer batchdecodeunpaddedgenout skipspecialtokenstrue dummytext this is a longer text 2 forces leftpadding on testtext paddedinputs tokenizertesttext dummytext returntensorspt paddingtrue tocuda 0 paddedgenout model generatepaddedinputs maxnewtokens20 paddedgentext tokenizer batchdecodepaddedgenout skipspecialtokenstrue expectedoutput a sequence 1 2 3 4 5 6 7 8 self assertlessunpaddedinputs inputids shape1 paddedinputs inputids shape1 leftpadding exists self assertequalunpaddedgentext0 expectedoutput self assertequalpaddedgentext0 expectedoutput coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch falcon model first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice todo ydshieh check this see https app circleci com pipelines github huggingface transformers 79245 workflows 9490ef58 79c2 410d 8f51 e3495156cf9c jobs 1012146 falcon can have different numbers of kv heads than the number of query heads so we need to override this test to use the right head counts if it doesn t support cache pass the test if past_key_values is not returned pass the test e g rwkv uses a different cache name and format k v for the decoder 2 fixed seed at init time so the two models get the same random weights fixed seed at init time so the two models get the same random weights dynamic scaling does not change the rope embeddings until it receives an input longer than the original maximum sequence length so the outputs for the short input should match the output should be different for long inputs the big models are way too big for the ci so we use tiny random models that resemble their architectures but with much smaller and fewer layers we just test that these run without errors the models are randomly initialized and so the actual text outputs will be garbage the big models are way too big for the ci so we use tiny random models that resemble their architectures but with much smaller and fewer layers test results are the same with and without cache should generate the rest of the sequence forces left padding on test_text left padding exists
import unittest from parameterized import parameterized from transformers import ( AutoModelForCausalLM, AutoTokenizer, FalconConfig, is_torch_available, set_seed, ) from transformers.testing_utils import require_bitsandbytes, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class FalconModelTester: def __init__( self, parent, batch_size=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return FalconConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=1, new_decoder_architecture=True, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = FalconModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = FalconModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (FalconForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": FalconModel, "question-answering": FalconForQuestionAnswering, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = FalconModelTester(self) self.config_tester = ConfigTester(self, config_class=FalconConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_position_embedding_types(self): config, *inputs = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: config.alibi = alibi self.model_tester.create_and_check_model(config, *inputs) def test_falcon_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_falcon_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_falcon_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_past_key_values_format(self): for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() if not hasattr(config, "use_cache"): return model = model_class(config).to(torch_device) if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) if "past_key_values" not in outputs: return num_hidden_layers = ( getattr(config, "decoder_layers", None) or getattr(config, "num_decoder_layers", None) or config.num_hidden_layers ) num_attention_heads = getattr(config, "num_kv_heads", config.num_attention_heads) embed_dim = getattr(config, "d_model", config.hidden_size) per_head_embed_dim = embed_dim // num_attention_heads past_kv = outputs["past_key_values"] self.assertEqual(len(past_kv), num_hidden_layers) batch_size, seq_length = inputs["input_ids"].shape for i in range(num_hidden_layers): if config.new_decoder_architecture: num_attention_heads = config.num_attention_heads elif config.multi_query: num_attention_heads = 1 self.assertEqual(len(past_kv[0]), 2) self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) original_model = FalconModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = FalconModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @require_torch class FalconLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_falcon(self): tokenizer = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b") model = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b") model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) EXPECTED_OUTPUT = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=19) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, EXPECTED_OUTPUT) @slow def test_lm_generation_big_models(self): for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) model.generate(**inputs, do_sample=False, max_new_tokens=4) model.generate(**inputs, do_sample=True, max_new_tokens=4) model.generate(**inputs, num_beams=2, max_new_tokens=4) @slow def test_lm_generation_use_cache(self): with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(device=torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) outputs_no_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=False) outputs_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=True) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0) @require_bitsandbytes @slow def test_batched_generation(self): tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( "tiiuae/falcon-7b", device_map="auto", load_in_4bit=True, ) test_text = "A sequence: 1, 2" unpadded_inputs = tokenizer([test_text], return_tensors="pt").to("cuda:0") unpadded_gen_out = model.generate(**unpadded_inputs, max_new_tokens=20) unpadded_gen_text = tokenizer.batch_decode(unpadded_gen_out, skip_special_tokens=True) dummy_text = "This is a longer text " * 2 padded_inputs = tokenizer([test_text, dummy_text], return_tensors="pt", padding=True).to("cuda:0") padded_gen_out = model.generate(**padded_inputs, max_new_tokens=20) padded_gen_text = tokenizer.batch_decode(padded_gen_out, skip_special_tokens=True) expected_output = "A sequence: 1, 2, 3, 4, 5, 6, 7, 8, " self.assertLess(unpadded_inputs.input_ids.shape[-1], padded_inputs.input_ids.shape[-1]) self.assertEqual(unpadded_gen_text[0], expected_output) self.assertEqual(padded_gen_text[0], expected_output)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer flaubert has 2 qa models need to manually set the correct labels for one of them here flaubertformultiplechoice behaves incorrectly in jit environments coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license small variation of seq_length todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer flaubert has 2 qa models need to manually set the correct labels for one of them here flaubertformultiplechoice behaves incorrectly in jit environments
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class FlaubertModelTester(object): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=12, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, summary_type="last", use_proj=None, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_lengths = use_input_lengths self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.vocab_size = vocab_size self.n_special = n_special self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.summary_type = summary_type self.use_proj = use_proj self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def get_config(self): return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def create_and_check_flaubert_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, lengths=input_lengths, langs=token_type_ids) result = model(input_ids, langs=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_flaubert_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertWithLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_flaubert_simple_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_flaubert_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) (total_loss,) = result_with_labels.to_tuple() result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def create_and_check_flaubert_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_flaubert_token_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = FlaubertForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_flaubert_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = FlaubertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class FlaubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FlaubertModelTester(self) self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_flaubert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*config_and_inputs) def test_flaubert_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs) def test_flaubert_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs) def test_flaubert_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*config_and_inputs) def test_flaubert_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs) def test_flaubert_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs) def test_flaubert_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlaubertModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class == FlaubertForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt")) loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) @require_torch class FlaubertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
codingutf8 2018 the google ai language team s licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer compare the actual values for a slice coding utf 8 2018 the google ai language team s licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license small variation of seq_length todo pvp check other models whether language generation is also applicable todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer j aime flaubert compare the actual values for a slice
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class TFFlaubertModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_lengths = True self.use_token_type_ids = True self.use_labels = True self.gelu_activation = True self.sinusoidal_embeddings = False self.causal = False self.asm = False self.n_langs = 2 self.vocab_size = 99 self.n_special = 0 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.summary_type = "last" self.use_proj = True self.scope = None self.bos_token_id = 0 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.float32) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2, dtype=tf.float32) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, bos_token_id=self.bos_token_id, ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def create_and_check_flaubert_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFFlaubertModel(config=config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_flaubert_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFFlaubertWithLMHeadModel(config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_flaubert_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFFlaubertForQuestionAnsweringSimple(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_flaubert_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFFlaubertForSequenceClassification(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_flaubert_for_token_classification( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = TFFlaubertForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_flaubert_for_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = TFFlaubertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class TFFlaubertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) all_generative_model_classes = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): return True return False def setUp(self): self.model_tester = TFFlaubertModelTester(self) self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_flaubert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*config_and_inputs) def test_flaubert_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs) def test_flaubert_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*config_and_inputs) def test_flaubert_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFFlaubertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf @require_sentencepiece @require_tokenizers class TFFlaubertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased") input_ids = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]], dtype=tf.int32, ) output = model(input_ids)[0] expected_shape = tf.TensorShape((1, 8, 512)) self.assertEqual(output.shape, expected_shape) expected_slice = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ], dtype=tf.float32, ) self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
codingutf8 2022 meta platforms s and huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize imageprocessing create random pil images test not batched input test no bool masked pos test batched test no bool masked pos initialize imageprocessing create random tensors test not batched input test batched test masking initialize imageprocessing test not batched input initialize imageprocessing create random pil images test not batched input test batched coding utf 8 2022 meta platforms s and huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize image_processing create random pil images test not batched input test no bool masked pos test batched test no bool masked pos initialize image_processing create random tensors test not batched input test batched test masking initialize image_processing test not batched input initialize image_processing create random pil images test not batched input test batched
import random import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): import PIL from transformers import FlavaImageProcessor from transformers.image_utils import PILImageResampling from transformers.models.flava.image_processing_flava import ( FLAVA_CODEBOOK_MEAN, FLAVA_CODEBOOK_STD, FLAVA_IMAGE_MEAN, FLAVA_IMAGE_STD, ) else: FLAVA_IMAGE_MEAN = FLAVA_IMAGE_STD = FLAVA_CODEBOOK_MEAN = FLAVA_CODEBOOK_STD = None class FlavaImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, resample=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=FLAVA_IMAGE_MEAN, image_std=FLAVA_IMAGE_STD, input_size_patches=14, total_mask_patches=75, mask_group_max_patches=None, mask_group_min_patches=16, mask_group_min_aspect_ratio=0.3, mask_group_max_aspect_ratio=None, codebook_do_resize=True, codebook_size=None, codebook_resample=None, codebook_do_center_crop=True, codebook_crop_size=None, codebook_do_map_pixels=True, codebook_do_normalize=True, codebook_image_mean=FLAVA_CODEBOOK_MEAN, codebook_image_std=FLAVA_CODEBOOK_STD, ): size = size if size is not None else {"height": 224, "width": 224} crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112} codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.do_resize = do_resize self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.min_resolution = min_resolution self.max_resolution = max_resolution self.size = size self.resample = resample if resample is not None else PILImageResampling.BICUBIC self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_center_crop = do_center_crop self.crop_size = crop_size self.input_size_patches = input_size_patches self.total_mask_patches = total_mask_patches self.mask_group_max_patches = mask_group_max_patches self.mask_group_min_patches = mask_group_min_patches self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio self.codebook_do_resize = codebook_do_resize self.codebook_size = codebook_size self.codebook_resample = codebook_resample if codebook_resample is not None else PILImageResampling.LANCZOS self.codebook_do_center_crop = codebook_do_center_crop self.codebook_crop_size = codebook_crop_size self.codebook_do_map_pixels = codebook_do_map_pixels self.codebook_do_normalize = codebook_do_normalize self.codebook_image_mean = codebook_image_mean self.codebook_image_std = codebook_image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "resample": self.resample, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "input_size_patches": self.input_size_patches, "total_mask_patches": self.total_mask_patches, "mask_group_max_patches": self.mask_group_max_patches, "mask_group_min_patches": self.mask_group_min_patches, "mask_group_min_aspect_ratio": self.mask_group_min_aspect_ratio, "mask_group_max_aspect_ratio": self.mask_group_min_aspect_ratio, "codebook_do_resize": self.codebook_do_resize, "codebook_size": self.codebook_size, "codebook_resample": self.codebook_resample, "codebook_do_center_crop": self.codebook_do_center_crop, "codebook_crop_size": self.codebook_crop_size, "codebook_do_map_pixels": self.codebook_do_map_pixels, "codebook_do_normalize": self.codebook_do_normalize, "codebook_image_mean": self.codebook_image_mean, "codebook_image_std": self.codebook_image_std, } def get_expected_image_size(self): return (self.size["height"], self.size["width"]) def get_expected_mask_size(self): return ( (self.input_size_patches, self.input_size_patches) if not isinstance(self.input_size_patches, tuple) else self.input_size_patches ) def get_expected_codebook_image_size(self): return (self.codebook_size["height"], self.codebook_size["width"]) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class FlavaImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = FlavaImageProcessor if is_vision_available() else None maxDiff = None def setUp(self): self.image_processor_tester = FlavaImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "resample")) self.assertTrue(hasattr(image_processing, "crop_size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "masking_generator")) self.assertTrue(hasattr(image_processing, "codebook_do_resize")) self.assertTrue(hasattr(image_processing, "codebook_size")) self.assertTrue(hasattr(image_processing, "codebook_resample")) self.assertTrue(hasattr(image_processing, "codebook_do_center_crop")) self.assertTrue(hasattr(image_processing, "codebook_crop_size")) self.assertTrue(hasattr(image_processing, "codebook_do_map_pixels")) self.assertTrue(hasattr(image_processing, "codebook_do_normalize")) self.assertTrue(hasattr(image_processing, "codebook_image_mean")) self.assertTrue(hasattr(image_processing, "codebook_image_std")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 224, "width": 224}) self.assertEqual(image_processor.crop_size, {"height": 224, "width": 224}) self.assertEqual(image_processor.codebook_size, {"height": 112, "width": 112}) self.assertEqual(image_processor.codebook_crop_size, {"height": 112, "width": 112}) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, crop_size=84, codebook_size=33, codebook_crop_size=66 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) self.assertEqual(image_processor.codebook_size, {"height": 33, "width": 33}) self.assertEqual(image_processor.codebook_crop_size, {"height": 66, "width": 66}) def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, PIL.Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors="pt") self.assertFalse("bool_masked_pos" in encoded_images) expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.pixel_values.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) encoded_images = image_processing(image_inputs, return_tensors="pt") expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertFalse("bool_masked_pos" in encoded_images) self.assertEqual( encoded_images.pixel_values.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def _test_call_framework(self, instance_class, prepare_kwargs): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, **prepare_kwargs) for image in image_inputs: self.assertIsInstance(image, instance_class) encoded_images = image_processing(image_inputs[0], return_tensors="pt") expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.pixel_values.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors="pt") expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.pixel_values.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) expected_height, expected_width = self.image_processor_tester.get_expected_mask_size() self.assertEqual( encoded_images.bool_masked_pos.shape, ( self.image_processor_tester.batch_size, expected_height, expected_width, ), ) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors="pt") expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.pixel_values.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) expected_height, expected_width = self.image_processor_tester.get_expected_mask_size() self.assertEqual( encoded_images.bool_masked_pos.shape, ( self.image_processor_tester.batch_size, expected_height, expected_width, ), ) def test_call_numpy(self): self._test_call_framework(np.ndarray, prepare_kwargs={"numpify": True}) def test_call_numpy_4_channels(self): self.image_processing_class.num_channels = 4 self._test_call_framework(np.ndarray, prepare_kwargs={"numpify": True}) self.image_processing_class.num_channels = 3 def test_call_pytorch(self): self._test_call_framework(torch.Tensor, prepare_kwargs={"torchify": True}) def test_masking(self): random.seed(1234) image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) encoded_images = image_processing(image_inputs[0], return_image_mask=True, return_tensors="pt") self.assertEqual(encoded_images.bool_masked_pos.sum().item(), 75) def test_codebook_pixels(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, PIL.Image.Image) encoded_images = image_processing(image_inputs[0], return_codebook_pixels=True, return_tensors="pt") expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size() self.assertEqual( encoded_images.codebook_pixel_values.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) encoded_images = image_processing(image_inputs, return_codebook_pixels=True, return_tensors="pt") expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size() self.assertEqual( encoded_images.codebook_pixel_values.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
codingutf8 2022 meta platforms s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch flava model import inspect import os import random import tempfile import unittest import numpy as np import requests from transformers import flavaconfig flavaimagecodebookconfig flavaimageconfig flavamultimodalconfig flavatextconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import flavaforpretraining flavaimagecodebook flavaimagemodel flavamodel flavamultimodalmodel flavatextmodel from transformers models flava modelingflava import flavacodebookpretrainedmodelarchivelist flavapretrainedmodelarchivelist else flavamodel none flavaforpretraining none torch if isvisionavailable from pil import image from transformers import flavaprocessor class flavaimagemodeltester def init self parent batchsize12 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 0 attentionprobsdropoutprob0 0 initializerrange0 02 layernormeps1e12 imagesize30 patchsize2 numchannels3 qkvbiastrue masktokentrue vocabsize99 self parent parent self batchsize batchsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self layernormeps layernormeps self imagesize imagesize self patchsize patchsize self numchannels numchannels self qkvbias qkvbias self masktoken masktoken self vocabsize vocabsize def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize numpatches self imagesize self patchsize boolmaskedpos torch randself batchsize numpatches numpatches devicepixelvalues device 0 9 long config self getconfig return config pixelvalues boolmaskedpos def getconfigself return flavaimageconfig hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange layernormepsself layernormeps imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels qkvbiasself qkvbias masktokenself masktoken vocabsizeself vocabsize def createandcheckmodelself config pixelvalues boolmaskedpos model flavaimagemodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues boolmaskedpos expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues boolmaskedpos configandinputs inputsdict pixelvalues pixelvalues boolmaskedpos boolmaskedpos return config inputsdict requiretorch class flavaimagemodeltestmodeltestermixin unittest testcase allmodelclasses flavaimagemodel if istorchavailable else testpruning false testtorchscript false testresizeembeddings false testheadmasking false def setupself self modeltester flavaimagemodeltesterself self configtester configtesterself configclassflavaimageconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests def testinputsembedsself flava does not use inputsembeds pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true in flava the seqlen equals the number of patches 1 we add 1 for the cls token imagesize self modeltester imagesize self modeltester imagesize patchsize self modeltester patchsize self modeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 1 for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads seqlen seqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers flava has a different seqlength imagesize self modeltester imagesize self modeltester imagesize patchsize self modeltester patchsize self modeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlength numpatches 1 self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass skip this test as flavaimagemodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass skip this test as flavaimagemodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in flavapretrainedmodelarchivelist 1 model flavaimagemodel frompretrainedmodelname self assertisnotnonemodel class flavatextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue vocabsize102 typevocabsize2 maxpositionembeddings512 positionembeddingtypeabsolute hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 0 attentionprobsdropoutprob0 0 initializerrange0 02 layernormeps1e12 padtokenid0 qkvbiastrue self parent parent self batchsize batchsize self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self seqlength seqlength self vocabsize vocabsize self typevocabsize typevocabsize self maxpositionembeddings maxpositionembeddings self positionembeddingtype positionembeddingtype self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self layernormeps layernormeps self qkvbias qkvbias self padtokenid padtokenid def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize config self getconfig return config inputids tokentypeids inputmask def getconfigself return flavatextconfig vocabsizeself vocabsize typevocabsizeself typevocabsize maxpositionembeddingsself maxpositionembeddings positionembeddingtypeself positionembeddingtype hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange layernormepsself layernormeps padtokenidself padtokenid qkvbiasself qkvbias def createandcheckmodelself config inputids tokentypeids inputmask model flavatextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids tokentypeidstokentypeids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class flavatextmodeltestmodeltestermixin unittest testcase allmodelclasses flavatextmodel if istorchavailable else testpruning false testheadmasking false testtorchscript false def setupself self modeltester flavatextmodeltesterself self configtester configtesterself configclassflavatextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testinputsembedsself flava does not use inputsembeds pass skip this test as flavatextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass skip this test as flavatextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in flavapretrainedmodelarchivelist 1 model flavatextmodel frompretrainedmodelname self assertisnotnonemodel class flavamultimodalmodeltester def init self parent batchsize12 seqlength44 useinputmasktrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 0 attentionprobsdropoutprob0 0 initializerrange0 02 layernormeps1e12 qkvbiastrue ceignoreindex100 useclstokentrue self parent parent self batchsize batchsize self seqlength seqlength self useinputmask useinputmask self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self layernormeps layernormeps self qkvbias qkvbias self ceignoreindex ceignoreindex self useclstoken useclstoken def prepareconfigandinputsself hiddenstates floatstensorself batchsize self seqlength 1 self hiddensize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config hiddenstates inputmask def getconfigself return flavamultimodalconfig hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange layernormepsself layernormeps qkvbiasself qkvbias useclstokenself useclstoken ceignoreindexself ceignoreindex def createandcheckmodelself config hiddenstates inputmask model flavamultimodalmodelconfigconfig model totorchdevice model eval with torch nograd result modelhiddenstates attentionmaskinputmask result modelhiddenstates self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config hiddenstates inputmask configandinputs inputsdict hiddenstates hiddenstates attentionmask inputmask return config inputsdict requiretorch class flavamultimodalmodeltestmodeltestermixin unittest testcase allmodelclasses flavamultimodalmodel if istorchavailable else testpruning false testheadmasking false testresizeembeddings false testtorchscript false def setupself self modeltester flavamultimodalmodeltesterself self configtester configtester self configclassflavamultimodalconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames hiddenstates self assertlistequalargnames 1 expectedargnames def testmodelcommonattributesself no embedding in multimodal model pass def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testinputsembedsself flava does not use inputsembeds pass skip this test as flavamultimodalmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass skip this test as flavamultimodalmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in flavapretrainedmodelarchivelist 1 model flavamultimodalmodel frompretrainedmodelname self assertisnotnonemodel class flavaimagecodebooktester def init self parent batchsize12 imagesize112 numchannels3 hiddensize32 numgroups2 vocabsize99 self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self hiddensize hiddensize self numgroups numgroups self vocabsize vocabsize def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return flavaimagecodebookconfig hiddensizeself hiddensize numgroupsself numgroups vocabsizeself vocabsize def createandcheckmodelself config pixelvalues model flavaimagecodebookconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues self parent assertequal result shape self batchsize config vocabsize self imagesize 8 self imagesize 8 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class flavaimagecodebooktestmodeltestermixin unittest testcase allmodelclasses flavaimagecodebook if istorchavailable else testpruning false testheadmasking false testresizeembeddings false testtorchscript false hasattentions false def setupself self modeltester flavaimagecodebooktesterself self configtester configtesterself configclassflavaimagecodebookconfig hastextmodalityfalse def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames unittest skipreasonflava does not output attentions def testattentionoutputsself pass def testmodelcommonattributesself no embedding in multimodal model pass def testtrainingself pass def testhiddenstatesoutputself pass def testretaingradhiddenstatesattentionsself no attentions pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testinputsembedsself flava does not use inputsembeds pass def testmodeloutputsequivalenceself pass skip this test as flavaimagecodebook has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass skip this test as flavaimagecodebook has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in flavacodebookpretrainedmodelarchivelist 1 model flavaimagecodebook frompretrainedmodelname self assertisnotnonemodel class flavamodeltester modelclass flavamodel def init self parent textkwargsnone imagekwargsnone multimodalkwargsnone imagecodebookkwargsnone istrainingtrue hiddensize32 projectiondim32 initializerrange0 02 layernormeps1e12 if textkwargs is none textkwargs if imagekwargs is none imagekwargs if multimodalkwargs is none multimodalkwargs if imagecodebookkwargs is none imagecodebookkwargs self parent parent self imagemodeltester flavaimagemodeltesterparent imagekwargs self textmodeltester flavatextmodeltesterparent textkwargs self multimodalmodeltester flavamultimodalmodeltesterparent multimodalkwargs self imagecodebooktester flavaimagecodebooktesterparent imagecodebookkwargs self istraining istraining self configtester configtesterself configclassflavaconfig hiddensize37 self hiddensize hiddensize self projectiondim projectiondim self initializerrange initializerrange self layernormeps layernormeps def testconfigself self configtester runcommontests def prepareconfigandinputsforcommonself pixelvalues boolmaskedpos self imagemodeltester prepareconfigandinputs inputids tokentypeids attentionmask self textmodeltester prepareconfigandinputs config self getconfig return config inputids inputids tokentypeids tokentypeids attentionmask attentionmask pixelvalues pixelvalues boolmaskedpos boolmaskedpos def getconfigself return flavaconfig fromconfigs self imagemodeltester getconfig self textmodeltester getconfig self multimodalmodeltester getconfig self imagecodebooktester getconfig hiddensizeself hiddensize projectiondimself projectiondim initializerrangeself initializerrange layernormepsself layernormeps def createandcheckmodelself config inputs self testmodelconfig inputs testimagetrue self testmodelconfig inputs testtexttrue self testmodelconfig inputs testimagetrue testtexttrue def testmodelself config inputs testimagefalse testtextfalse model self modelclassconfig totorchdevice eval with torch nograd result model inputidsinputsinputids if testtext else none attentionmaskinputsattentionmask if testtext else none tokentypeidsinputstokentypeids if testtext else none pixelvaluesinputspixelvalues if testimage else none boolmaskedposinputsboolmaskedpos if testimage else none imagesize self imagemodeltester imagesize self imagemodeltester imagesize patchsize self imagemodeltester patchsize self imagemodeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 if testimage self parent assertequal result imageembeddings shape self imagemodeltester batchsize numpatches 1 self imagemodeltester hiddensize else self parent assertisnoneresult imageembeddings if testtext self parent assertequal result textembeddings shape self textmodeltester batchsize self textmodeltester seqlength self textmodeltester hiddensize else self parent assertisnoneresult textembeddings if testimage and testtext self parent assertequal result multimodalembeddings shape self multimodalmodeltester batchsize self textmodeltester seqlength numpatches 2 self multimodalmodeltester hiddensize else self parent assertisnoneresult multimodalembeddings requiretorch class flavamodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses flavamodel if istorchavailable else pipelinemodelmapping featureextraction flavamodel if istorchavailable else classfortester flavamodeltester testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false def setupself self modeltester self classfortesterself def testmodelself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester createandcheckmodelconfigandinputs hiddenstates are tested in individual model tests def testhiddenstatesoutputself pass inputembeds are tested in individual model tests def testinputsembedsself pass tested in individual model tests def testretaingradhiddenstatesattentionsself pass flavamodel does not have inputoutput embeddings def testmodelcommonattributesself pass override as the logitscale parameter initilization is different for flava def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale or name flava logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false configsnoinit returnloss false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues flava needs pixelvalues if inputidsmasked in inputsdict for pretraining inputs inputids inputsdictinputidsmasked pixelvalues else inputs inputids pixelvalues tracedmodel torch jit tracemodel inputs except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict non persistent buffers won t be in original state dict loadedmodelstatedict poptextmodel embeddings tokentypeids none nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadimagetextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save flavaconfig and check if we can load flavaimageconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname imageconfig flavaimageconfig frompretrainedtmpdirname self assertdictequalconfig imageconfig todict imageconfig todict save flavaconfig and check if we can load flavatextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig flavatextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict save flavaconfig and check if we can load flavamultimodalconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname multimodalconfig flavamultimodalconfig frompretrainedtmpdirname self assertdictequalconfig multimodalconfig todict multimodalconfig todict overwrite from common since flavamodeltfflavamodel return flavaoutputtfflavaoutput slow def testmodelfrompretrainedself for modelname in flavapretrainedmodelarchivelist 1 model flavamodel frompretrainedmodelname self assertisnotnonemodel class flavaforpretrainingtesterflavamodeltester modelclass flavaforpretraining def prepareconfigandinputsforcommonself pixelvalues boolmaskedpos self imagemodeltester prepareconfigandinputs inputids tokentypeids attentionmask self textmodeltester prepareconfigandinputs config self getconfig inputidsmasked inputids detach clone inputidsmasked 1 3 100 mlmlabels inputids detach clone mlmlabels config ceignoreindex mlmlabels 1 3 inputids 1 3 mimlabels torch randint 0 self imagemodeltester vocabsize boolmaskedpos size deviceboolmaskedpos device long mimlabelsboolmaskedpos netrue config ceignoreindex itmlabels torch onesmlmlabels size0 deviceboolmaskedpos device long return config inputids inputids inputidsmasked inputidsmasked tokentypeids tokentypeids attentionmask attentionmask pixelvalues pixelvalues boolmaskedpos boolmaskedpos mlmlabels mlmlabels mimlabels mimlabels itmlabels itmlabels returnloss true def testmodelself config inputs testimagefalse testtextfalse model self modelclassconfig totorchdevice eval with torch nograd result model inputidsinputsinputids if testtext else none inputidsmaskedinputsinputidsmasked if testtext else none attentionmaskinputsattentionmask if testtext else none tokentypeidsinputstokentypeids if testtext else none pixelvaluesinputspixelvalues if testimage else none boolmaskedposinputsboolmaskedpos if testimage else none mlmlabelsinputsmlmlabels mimlabelsinputsmimlabels itmlabelsinputsitmlabels returnlossinputsreturnloss imagesize self imagemodeltester imagesize self imagemodeltester imagesize patchsize self imagemodeltester patchsize self imagemodeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 if testimage self parent assertequal result imageembeddings shape self imagemodeltester batchsize numpatches 1 self imagemodeltester hiddensize if not testtext self parent assertequal result lossinfo mim dim 0 self parent assertequal result mimlogits shape inputsboolmaskedpos sum item self imagemodeltester vocabsize else self parent assertisnoneresult imageembeddings if testtext self parent assertequal result textembeddings shape self textmodeltester batchsize self textmodeltester seqlength self textmodeltester hiddensize if not testimage self parent assertequalresult lossinfo mlm dim 0 self parent assertequal result mlmlogits shape inputsmlmlabels self multimodalmodeltester ceignoreindex sum item self textmodeltester vocabsize else self parent assertisnoneresult textembeddings if testimage and testtext self parent assertequal result multimodalmaskedembeddings shape self multimodalmodeltester batchsize self textmodeltester seqlength numpatches 2 self multimodalmodeltester hiddensize self parent assertequal result itmlogits shape self textmodeltester batchsize 2 self parent assertequal result mmmtextlogits shape inputsmlmlabels self multimodalmodeltester ceignoreindex sum item self textmodeltester vocabsize self parent assertequal result mmmimagelogits shape inputsboolmaskedpos sum item self imagemodeltester vocabsize self parent assertequal result contrastivelogitsperimage shape self imagemodeltester batchsize self textmodeltester batchsize self parent assertequal result contrastivelogitspertext shape self textmodeltester batchsize self imagemodeltester batchsize for item in result lossinfo globalcontrastive result lossinfo itm result lossinfo mmmtext result lossinfo mmmimage self parent assertequalitem dim 0 for item in result lossinfo mim result lossinfo mlm self parent assertisnoneitem else self parent assertisnoneresult multimodalmaskedembeddings for item in result lossinfo globalcontrastive result lossinfo itm result lossinfo mmmtext result lossinfo mmmimage self parent assertisnoneitem self parent assertisnoneresult multimodalembeddings requiretorch class flavaforpretrainingtestflavamodeltest allmodelclasses flavaforpretraining if istorchavailable else classfortester flavaforpretrainingtester testtorchscript false unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass we will verify our results on an image of cute cats def prepareimg url http images cocodataset orgval2017000000039769 jpg im image openrequests geturl streamtrue raw return im requirevision requiretorch class flavamodelintegrationtestunittest testcase slow def testinferenceself modelname facebookflavafull model flavamodel frompretrainedmodelname totorchdevice processor flavaprocessor frompretrainedmodelname image prepareimg inputs processor texta photo of a cat a photo of a dog imagesimage image paddingmaxlength maxlength77 returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs returndicttrue verify the embeddings self assertalmostequaloutputs imageembeddings sum item 1352 53540 places4 self assertalmostequaloutputs textembeddings sum item 198 98225 places4 self assertalmostequaloutputs multimodalembeddings sum item 3988 51367 places4 requirevision requiretorch class flavaforpretrainingintegrationtestunittest testcase slow def testinferenceself modelname facebookflavafull model flavaforpretraining frompretrainedmodelname totorchdevice processor flavaprocessor frompretrainedmodelname torch manualseed1 random seed1 image prepareimg inputs processor texta photo of a cat a photo of a dog imagesimage image paddingmaxlength maxlength77 returntensorspt returncodebookpixelstrue returnimagemasktrue inputsinputidsmasked inputsinputids clone inputsinputidsmasked0 4 6 103 inputsmlmlabels inputsinputids clone inputsmlmlabels 100 inputsmlmlabels0 4 6 inputsinputids0 4 6 inputs inputs totorchdevice forward pass with torch nograd outputs modelinputs verify the logits self assertequal outputs contrastivelogitsperimage shape torch sizeinputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs contrastivelogitspertext shape torch sizeinputs inputids shape0 inputs pixelvalues shape0 expectedlogits torch tensor16 1291 8 4033 16 1291 8 4033 devicetorchdevice self asserttruetorch allcloseoutputs contrastivelogitsperimage expectedlogits atol1e3 self assertalmostequaloutputs lossinfo mmmtext item 1 75533199 places4 self assertalmostequaloutputs lossinfo mmmimage item 7 0290069 places4 self assertalmostequaloutputs loss item 11 0626 places4 coding utf 8 2022 meta platforms s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch flava model expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as flava does not use input_ids inputs_embeds attention_mask and seq_length flava does not use inputs_embeds signature parameters is an ordereddict so arg_names order is deterministic in flava the seq_len equals the number of patches 1 we add 1 for the cls token check that output_attentions also work using config check attention is always last and order is fine flava has a different seq_length check that output_hidden_states also work using config skip this test as flavaimagemodel has no base class and is not available in model_mapping skip this test as flavaimagemodel has no base class and is not available in model_mapping flava does not use inputs_embeds skip this test as flavatextmodel has no base class and is not available in model_mapping skip this test as flavatextmodel has no base class and is not available in model_mapping signature parameters is an ordereddict so arg_names order is deterministic no embedding in multimodal model flava does not use inputs_embeds skip this test as flavamultimodalmodel has no base class and is not available in model_mapping skip this test as flavamultimodalmodel has no base class and is not available in model_mapping signature parameters is an ordereddict so arg_names order is deterministic no embedding in multimodal model no attentions flava does not use inputs_embeds skip this test as flavaimagecodebook has no base class and is not available in model_mapping skip this test as flavaimagecodebook has no base class and is not available in model_mapping hidden_states are tested in individual model tests input_embeds are tested in individual model tests tested in individual model tests flavamodel does not have input output embeddings override as the logit_scale parameter initilization is different for flava check if logit_scale is initilized as per the original implementation to be sure we have no nan flava needs pixel_values for pretraining non persistent buffers won t be in original state dict save flavaconfig and check if we can load flavaimageconfig from it save flavaconfig and check if we can load flavatextconfig from it save flavaconfig and check if we can load flavamultimodalconfig from it overwrite from common since flavamodel tfflavamodel return flavaoutput tfflavaoutput we will verify our results on an image of cute cats forward pass verify the embeddings forward pass verify the logits
import inspect import os import random import tempfile import unittest import numpy as np import requests from transformers import ( FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig, ) from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FlavaForPreTraining, FlavaImageCodebook, FlavaImageModel, FlavaModel, FlavaMultimodalModel, FlavaTextModel, ) from transformers.models.flava.modeling_flava import ( FLAVA_CODEBOOK_PRETRAINED_MODEL_ARCHIVE_LIST, FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, ) else: FlavaModel = None FlavaForPreTraining = None torch = {} if is_vision_available(): from PIL import Image from transformers import FlavaProcessor class FlavaImageModelTester: def __init__( self, parent, batch_size=12, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=30, patch_size=2, num_channels=3, qkv_bias=True, mask_token=True, vocab_size=99, ): self.parent = parent self.batch_size = batch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.mask_token = mask_token self.vocab_size = vocab_size def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) num_patches = self.image_size // self.patch_size bool_masked_pos = ( torch.rand((self.batch_size, num_patches, num_patches), device=pixel_values.device) < 0.9 ).long() config = self.get_config() return config, pixel_values, bool_masked_pos def get_config(self): return FlavaImageConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, qkv_bias=self.qkv_bias, mask_token=self.mask_token, vocab_size=self.vocab_size, ) def create_and_check_model(self, config, pixel_values, bool_masked_pos): model = FlavaImageModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values, bool_masked_pos) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, bool_masked_pos = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "bool_masked_pos": bool_masked_pos} return config, inputs_dict @require_torch class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaImageModel,) if is_torch_available() else () test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = FlavaImageModelTester(self) self.config_tester = ConfigTester(self, config_class=FlavaImageConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_save_load_fast_init_from_base(self): pass def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaImageModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, vocab_size=102, type_vocab_size=2, max_position_embeddings=512, position_embedding_type="absolute", hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, qkv_bias=True, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.seq_length = seq_length self.vocab_size = vocab_size self.type_vocab_size = type_vocab_size self.max_position_embeddings = max_position_embeddings self.position_embedding_type = position_embedding_type self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.pad_token_id = pad_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = self.get_config() return config, input_ids, token_type_ids, input_mask def get_config(self): return FlavaTextConfig( vocab_size=self.vocab_size, type_vocab_size=self.type_vocab_size, max_position_embeddings=self.max_position_embeddings, position_embedding_type=self.position_embedding_type, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, pad_token_id=self.pad_token_id, qkv_bias=self.qkv_bias, ) def create_and_check_model(self, config, input_ids, token_type_ids, input_mask): model = FlavaTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FlavaTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaTextModel,) if is_torch_available() else () test_pruning = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = FlavaTextModelTester(self) self.config_tester = ConfigTester(self, config_class=FlavaTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_inputs_embeds(self): pass def test_save_load_fast_init_from_base(self): pass def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaMultimodalModelTester: def __init__( self, parent, batch_size=12, seq_length=44, use_input_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, qkv_bias=True, ce_ignore_index=-100, use_cls_token=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.use_input_mask = use_input_mask self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.ce_ignore_index = ce_ignore_index self.use_cls_token = use_cls_token def prepare_config_and_inputs(self): hidden_states = floats_tensor([self.batch_size, self.seq_length - 1, self.hidden_size]) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, hidden_states, input_mask def get_config(self): return FlavaMultimodalConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, qkv_bias=self.qkv_bias, use_cls_token=self.use_cls_token, ce_ignore_index=self.ce_ignore_index, ) def create_and_check_model(self, config, hidden_states, input_mask): model = FlavaMultimodalModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(hidden_states, attention_mask=input_mask) result = model(hidden_states) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, hidden_states, input_mask = config_and_inputs inputs_dict = {"hidden_states": hidden_states, "attention_mask": input_mask} return config, inputs_dict @require_torch class FlavaMultimodalModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaMultimodalModel,) if is_torch_available() else () test_pruning = False test_head_masking = False test_resize_embeddings = False test_torchscript = False def setUp(self): self.model_tester = FlavaMultimodalModelTester(self) self.config_tester = ConfigTester( self, config_class=FlavaMultimodalConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["hidden_states"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model_common_attributes(self): pass def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_inputs_embeds(self): pass def test_save_load_fast_init_from_base(self): pass def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaMultimodalModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaImageCodebookTester: def __init__( self, parent, batch_size=12, image_size=112, num_channels=3, hidden_size=32, num_groups=2, vocab_size=99, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.hidden_size = hidden_size self.num_groups = num_groups self.vocab_size = vocab_size def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return FlavaImageCodebookConfig( hidden_size=self.hidden_size, num_groups=self.num_groups, vocab_size=self.vocab_size ) def create_and_check_model(self, config, pixel_values): model = FlavaImageCodebook(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.shape, (self.batch_size, config.vocab_size, self.image_size // 8, self.image_size // 8) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class FlavaImageCodebookTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaImageCodebook,) if is_torch_available() else () test_pruning = False test_head_masking = False test_resize_embeddings = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = FlavaImageCodebookTester(self) self.config_tester = ConfigTester(self, config_class=FlavaImageCodebookConfig, has_text_modality=False) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) @unittest.skip(reason="Flava does not output attentions") def test_attention_outputs(self): pass def test_model_common_attributes(self): pass def test_training(self): pass def test_hidden_states_output(self): pass def test_retain_grad_hidden_states_attentions(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_inputs_embeds(self): pass def test_model_outputs_equivalence(self): pass def test_save_load_fast_init_from_base(self): pass def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in FLAVA_CODEBOOK_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaImageCodebook.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaModelTester: model_class = FlavaModel def __init__( self, parent, text_kwargs=None, image_kwargs=None, multimodal_kwargs=None, image_codebook_kwargs=None, is_training=True, hidden_size=32, projection_dim=32, initializer_range=0.02, layer_norm_eps=1e-12, ): if text_kwargs is None: text_kwargs = {} if image_kwargs is None: image_kwargs = {} if multimodal_kwargs is None: multimodal_kwargs = {} if image_codebook_kwargs is None: image_codebook_kwargs = {} self.parent = parent self.image_model_tester = FlavaImageModelTester(parent, **image_kwargs) self.text_model_tester = FlavaTextModelTester(parent, **text_kwargs) self.multimodal_model_tester = FlavaMultimodalModelTester(parent, **multimodal_kwargs) self.image_codebook_tester = FlavaImageCodebookTester(parent, **image_codebook_kwargs) self.is_training = is_training self.config_tester = ConfigTester(self, config_class=FlavaConfig, hidden_size=37) self.hidden_size = hidden_size self.projection_dim = projection_dim self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps def test_config(self): self.config_tester.run_common_tests() def prepare_config_and_inputs_for_common(self): _, pixel_values, bool_masked_pos = self.image_model_tester.prepare_config_and_inputs() _, input_ids, token_type_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "bool_masked_pos": bool_masked_pos, } def get_config(self): return FlavaConfig.from_configs( self.image_model_tester.get_config(), self.text_model_tester.get_config(), self.multimodal_model_tester.get_config(), self.image_codebook_tester.get_config(), hidden_size=self.hidden_size, projection_dim=self.projection_dim, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, ) def create_and_check_model(self, config, inputs): self._test_model(config, inputs, test_image=True) self._test_model(config, inputs, test_text=True) self._test_model(config, inputs, test_image=True, test_text=True) def _test_model(self, config, inputs, test_image=False, test_text=False): model = self.model_class(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=inputs["input_ids"] if test_text else None, attention_mask=inputs["attention_mask"] if test_text else None, token_type_ids=inputs["token_type_ids"] if test_text else None, pixel_values=inputs["pixel_values"] if test_image else None, bool_masked_pos=inputs["bool_masked_pos"] if test_image else None, ) image_size = (self.image_model_tester.image_size, self.image_model_tester.image_size) patch_size = (self.image_model_tester.patch_size, self.image_model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) if test_image: self.parent.assertEqual( result.image_embeddings.shape, (self.image_model_tester.batch_size, num_patches + 1, self.image_model_tester.hidden_size), ) else: self.parent.assertIsNone(result.image_embeddings) if test_text: self.parent.assertEqual( result.text_embeddings.shape, ( self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size, ), ) else: self.parent.assertIsNone(result.text_embeddings) if test_image and test_text: self.parent.assertEqual( result.multimodal_embeddings.shape, ( self.multimodal_model_tester.batch_size, self.text_model_tester.seq_length + num_patches + 2, self.multimodal_model_tester.hidden_size, ), ) else: self.parent.assertIsNone(result.multimodal_embeddings) @require_torch class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FlavaModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": FlavaModel} if is_torch_available() else {} class_for_tester = FlavaModelTester test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = self.class_for_tester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): pass def test_inputs_embeds(self): pass def test_retain_grad_hidden_states_attentions(self): pass def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale" or name == "flava.logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False configs_no_init.return_loss = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] if "input_ids_masked" in inputs_dict: inputs = (input_ids, inputs_dict["input_ids_masked"], pixel_values) else: inputs = (input_ids, pixel_values) traced_model = torch.jit.trace(model, inputs) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() loaded_model_state_dict.pop("text_model.embeddings.token_type_ids", None) non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_image_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) image_config = FlavaImageConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.image_config.to_dict(), image_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = FlavaTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) multimodal_config = FlavaMultimodalConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.multimodal_config.to_dict(), multimodal_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlavaModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaForPreTrainingTester(FlavaModelTester): model_class = FlavaForPreTraining def prepare_config_and_inputs_for_common(self): _, pixel_values, bool_masked_pos = self.image_model_tester.prepare_config_and_inputs() _, input_ids, token_type_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() input_ids_masked = input_ids.detach().clone() input_ids_masked[:, 1:3] = 100 mlm_labels = input_ids.detach().clone() mlm_labels[:, :] = config.ce_ignore_index mlm_labels[:, 1:3] = input_ids[:, 1:3] mim_labels = torch.randint( 0, self.image_model_tester.vocab_size, bool_masked_pos.size(), device=bool_masked_pos.device ).long() mim_labels[bool_masked_pos.ne(True)] = config.ce_ignore_index itm_labels = torch.ones(mlm_labels.size(0), device=bool_masked_pos.device).long() return config, { "input_ids": input_ids, "input_ids_masked": input_ids_masked, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "bool_masked_pos": bool_masked_pos, "mlm_labels": mlm_labels, "mim_labels": mim_labels, "itm_labels": itm_labels, "return_loss": True, } def _test_model(self, config, inputs, test_image=False, test_text=False): model = self.model_class(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=inputs["input_ids"] if test_text else None, input_ids_masked=inputs["input_ids_masked"] if test_text else None, attention_mask=inputs["attention_mask"] if test_text else None, token_type_ids=inputs["token_type_ids"] if test_text else None, pixel_values=inputs["pixel_values"] if test_image else None, bool_masked_pos=inputs["bool_masked_pos"] if test_image else None, mlm_labels=inputs["mlm_labels"], mim_labels=inputs["mim_labels"], itm_labels=inputs["itm_labels"], return_loss=inputs["return_loss"], ) image_size = (self.image_model_tester.image_size, self.image_model_tester.image_size) patch_size = (self.image_model_tester.patch_size, self.image_model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) if test_image: self.parent.assertEqual( result.image_embeddings.shape, (self.image_model_tester.batch_size, num_patches + 1, self.image_model_tester.hidden_size), ) if not test_text: self.parent.assertEqual( result.loss_info.mim.dim(), 0, ) self.parent.assertEqual( result.mim_logits.shape, (inputs["bool_masked_pos"].sum().item(), self.image_model_tester.vocab_size), ) else: self.parent.assertIsNone(result.image_embeddings) if test_text: self.parent.assertEqual( result.text_embeddings.shape, ( self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size, ), ) if not test_image: self.parent.assertEqual(result.loss_info.mlm.dim(), 0) self.parent.assertEqual( result.mlm_logits.shape, ( (inputs["mlm_labels"] != self.multimodal_model_tester.ce_ignore_index).sum().item(), self.text_model_tester.vocab_size, ), ) else: self.parent.assertIsNone(result.text_embeddings) if test_image and test_text: self.parent.assertEqual( result.multimodal_masked_embeddings.shape, ( self.multimodal_model_tester.batch_size, self.text_model_tester.seq_length + num_patches + 2, self.multimodal_model_tester.hidden_size, ), ) self.parent.assertEqual( result.itm_logits.shape, (self.text_model_tester.batch_size, 2), ) self.parent.assertEqual( result.mmm_text_logits.shape, ( (inputs["mlm_labels"] != self.multimodal_model_tester.ce_ignore_index).sum().item(), self.text_model_tester.vocab_size, ), ) self.parent.assertEqual( result.mmm_image_logits.shape, (inputs["bool_masked_pos"].sum().item(), self.image_model_tester.vocab_size), ) self.parent.assertEqual( result.contrastive_logits_per_image.shape, (self.image_model_tester.batch_size, self.text_model_tester.batch_size), ) self.parent.assertEqual( result.contrastive_logits_per_text.shape, (self.text_model_tester.batch_size, self.image_model_tester.batch_size), ) for item in [ result.loss_info.global_contrastive, result.loss_info.itm, result.loss_info.mmm_text, result.loss_info.mmm_image, ]: self.parent.assertEqual(item.dim(), 0) for item in [result.loss_info.mim, result.loss_info.mlm]: self.parent.assertIsNone(item) else: self.parent.assertIsNone(result.multimodal_masked_embeddings) for item in [ result.loss_info.global_contrastive, result.loss_info.itm, result.loss_info.mmm_text, result.loss_info.mmm_image, ]: self.parent.assertIsNone(item) self.parent.assertIsNone(result.multimodal_embeddings) @require_torch class FlavaForPreTrainingTest(FlavaModelTest): all_model_classes = (FlavaForPreTraining,) if is_torch_available() else () class_for_tester = FlavaForPreTrainingTester test_torchscript = False @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class FlavaModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "facebook/flava-full" model = FlavaModel.from_pretrained(model_name).to(torch_device) processor = FlavaProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=[image, image], padding="max_length", max_length=77, return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs, return_dict=True) self.assertAlmostEqual(outputs.image_embeddings.sum().item(), -1352.53540, places=4) self.assertAlmostEqual(outputs.text_embeddings.sum().item(), -198.98225, places=4) self.assertAlmostEqual(outputs.multimodal_embeddings.sum().item(), -3988.51367, places=4) @require_vision @require_torch class FlavaForPreTrainingIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "facebook/flava-full" model = FlavaForPreTraining.from_pretrained(model_name).to(torch_device) processor = FlavaProcessor.from_pretrained(model_name) torch.manual_seed(1) random.seed(1) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=[image, image], padding="max_length", max_length=77, return_tensors="pt", return_codebook_pixels=True, return_image_mask=True, ) inputs["input_ids_masked"] = inputs["input_ids"].clone() inputs["input_ids_masked"][0, 4:6] = 103 inputs["mlm_labels"] = inputs["input_ids"].clone() inputs["mlm_labels"][:, :] = -100 inputs["mlm_labels"][0, 4:6] = inputs["input_ids"][0, 4:6] inputs = inputs.to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertEqual( outputs.contrastive_logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.contrastive_logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[16.1291, 8.4033], [16.1291, 8.4033]], device=torch_device) self.assertTrue(torch.allclose(outputs.contrastive_logits_per_image, expected_logits, atol=1e-3)) self.assertAlmostEqual(outputs.loss_info.mmm_text.item(), 1.75533199, places=4) self.assertAlmostEqual(outputs.loss_info.mmm_image.item(), 7.0290069, places=4) self.assertAlmostEqual(outputs.loss.item(), 11.0626, places=4)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch fnet model import unittest from typing import dict list tuple from transformers import fnetconfig istorchavailable from transformers models auto import getvalues from transformers testingutils import requiretokenizers requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelforpretrainingmapping fnetformaskedlm fnetformultiplechoice fnetfornextsentenceprediction fnetforpretraining fnetforquestionanswering fnetforsequenceclassification fnetfortokenclassification fnetmodel fnettokenizerfast from transformers models fnet modelingfnet import fnetpretrainedmodelarchivelist fnetbasicfouriertransform isscipyavailable override configtester class fnetconfigtesterconfigtester def createandtestconfigcommonpropertiesself config self configclassself inputsdict if self hastextmodality self parent asserttruehasattrconfig vocabsize self parent asserttruehasattrconfig hiddensize self parent asserttruehasattrconfig numhiddenlayers class fnetmodeltester def init self parent batchsize13 seqlength7 istrainingtrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids tokentypeids sequencelabels tokenlabels choicelabels def getconfigself return fnetconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerrangeself initializerrange tpushortseqlengthself seqlength requiretorch def createandcheckfouriertransformself config hiddenstates floatstensorself batchsize self seqlength config hiddensize transform fnetbasicfouriertransformconfig fftnoutput transformhiddenstates config usetpufourieroptimizations true if isscipyavailable transform fnetbasicfouriertransformconfig dftoutput transformhiddenstates config maxpositionembeddings 4097 transform fnetbasicfouriertransformconfig fftoutput transformhiddenstates if isscipyavailable self parent asserttruetorch allclosefftnoutput00 dftoutput00 atol1e4 self parent asserttruetorch allclosefftoutput00 dftoutput00 atol1e4 self parent asserttruetorch allclosefftnoutput00 fftoutput00 atol1e4 def createandcheckmodelself config inputids tokentypeids sequencelabels tokenlabels choicelabels model fnetmodelconfigconfig model totorchdevice model eval result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforpretraining self config inputids tokentypeids sequencelabels tokenlabels choicelabels model fnetforpretrainingconfigconfig model totorchdevice model eval result model inputids tokentypeidstokentypeids labelstokenlabels nextsentencelabelsequencelabels self parent assertequalresult predictionlogits shape self batchsize self seqlength self vocabsize self parent assertequalresult seqrelationshiplogits shape self batchsize 2 def createandcheckformaskedlm self config inputids tokentypeids sequencelabels tokenlabels choicelabels model fnetformaskedlmconfigconfig model totorchdevice model eval result modelinputids tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckfornextsentenceprediction self config inputids tokentypeids sequencelabels tokenlabels choicelabels model fnetfornextsentencepredictionconfigconfig model totorchdevice model eval result model inputids tokentypeidstokentypeids nextsentencelabelsequencelabels self parent assertequalresult logits shape self batchsize 2 def createandcheckforquestionanswering self config inputids tokentypeids sequencelabels tokenlabels choicelabels model fnetforquestionansweringconfigconfig model totorchdevice model eval result model inputids tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforsequenceclassification self config inputids tokentypeids sequencelabels tokenlabels choicelabels config numlabels self numlabels model fnetforsequenceclassificationconfig model totorchdevice model eval result modelinputids tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids tokentypeids sequencelabels tokenlabels choicelabels config numlabels self numlabels model fnetfortokenclassificationconfigconfig model totorchdevice model eval result modelinputids tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckformultiplechoice self config inputids tokentypeids sequencelabels tokenlabels choicelabels config numchoices self numchoices model fnetformultiplechoiceconfigconfig model totorchdevice model eval multiplechoiceinputsids inputids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoicetokentypeids tokentypeids unsqueeze1 expand1 self numchoices 1 contiguous result model multiplechoiceinputsids tokentypeidsmultiplechoicetokentypeids labelschoicelabels self parent assertequalresult logits shape self batchsize self numchoices def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids return config inputsdict requiretorch class fnetmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses fnetmodel fnetforpretraining fnetformaskedlm fnetfornextsentenceprediction fnetformultiplechoice fnetforquestionanswering fnetforsequenceclassification fnetfortokenclassification if istorchavailable else pipelinemodelmapping featureextraction fnetmodel fillmask fnetformaskedlm questionanswering fnetforquestionanswering textclassification fnetforsequenceclassification tokenclassification fnetfortokenclassification zeroshot fnetforsequenceclassification if istorchavailable else skip tests testpruning false testheadmasking false testpruning false todo fix the failed tests def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname if pipelinetestcassename qapipelinetests and not tokenizername endswithfast return true return false special case for forpretraining model def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass in getvaluesmodelforpretrainingmapping inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice inputsdictnextsentencelabel torch zeros self modeltester batchsize dtypetorch long devicetorchdevice return inputsdict overriden tests def testattentionoutputsself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testmodeloutputsequivalenceself config inputsdict self modeltester prepareconfigandinputsforcommon def setnantensortozerot tt t 0 return t def checkequivalencemodel tupleinputs dictinputs additionalkwargs with torch nograd tupleoutput modeltupleinputs returndictfalse additionalkwargs dictoutput modeldictinputs returndicttrue additionalkwargs totuple def recursivechecktupleobject dictobject if isinstancetupleobject list tuple for tupleiterablevalue dictiterablevalue in ziptupleobject dictobject recursivechecktupleiterablevalue dictiterablevalue elif isinstancetupleobject dict for tupleiterablevalue dictiterablevalue in zip tupleobject values dictobject values recursivechecktupleiterablevalue dictiterablevalue elif tupleobject is none return else self asserttrue torch allclose setnantensortozerotupleobject setnantensortozerodictobject atol1e5 msg tuple and dict output are not equal difference f torch maxtorch abstupleobject dictobject tuple has nan f torch isnantupleobject any and inf torch isinftupleobject dict has f nan torch isnandictobject any and inf torch isinfdictobject recursivechecktupleoutput dictoutput for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputhiddenstates true tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs outputhiddenstates true def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice inputs self prepareforclassinputsdict modelclass outputs modelinputs output outputs0 hiddenstates outputs hiddenstates0 hiddenstates retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad def setupself self modeltester fnetmodeltesterself self configtester fnetconfigtesterself configclassfnetconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforpretrainingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforpretrainingconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in fnetpretrainedmodelarchivelist 1 model fnetmodel frompretrainedmodelname self assertisnotnonemodel requiretorch class fnetmodelintegrationtestunittest testcase slow def testinferenceformaskedlmself model fnetformaskedlm frompretrainedgooglefnetbase model totorchdevice inputids torch tensor0 1 2 3 4 5 devicetorchdevice with torch nograd output modelinputids0 vocabsize 32000 expectedshape torch size1 6 vocabsize self assertequaloutput shape expectedshape expectedslice torch tensor 1 7819 7 7384 7 5002 3 4746 8 5943 7 7762 3 2052 9 0771 8 3468 devicetorchdevice self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 slow requiretokenizers def testinferencelongsentenceself tokenizer fnettokenizerfast frompretrainedgooglefnetbase inputs tokenizer the man worked as a mask this is his mask returntensorspt paddingmaxlength maxlength512 torch testing assertallcloseinputsinputids torch tensor4 13 283 2479 106 8 6 845 5 168 65 367 6 845 5 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 fmt skip inputs k v totorchdevice for k v in inputs items model fnetformaskedlm frompretrainedgooglefnetbase model totorchdevice logits modelinputs logits predictionsmask1 tokenizer decodelogits0 6 topk5 indices predictionsmask2 tokenizer decodelogits0 12 topk5 indices self assertequalpredictionsmask1 split man child teacher woman model self assertequalpredictionsmask2 split work wife job story name slow def testinferencefornextsentencepredictionself model fnetfornextsentenceprediction frompretrainedgooglefnetbase model totorchdevice inputids torch tensor0 1 2 3 4 5 devicetorchdevice with torch nograd output modelinputids0 expectedshape torch size1 2 self assertequaloutput shape expectedshape expectedslice torch tensor0 2234 0 0226 devicetorchdevice self asserttruetorch allcloseoutput expectedslice atol1e4 slow def testinferencemodelself model fnetmodel frompretrainedgooglefnetbase model totorchdevice inputids torch tensor0 1 2 3 4 5 devicetorchdevice with torch nograd output modelinputids0 expectedshape torch size1 6 model config hiddensize self assertequaloutput shape expectedshape expectedslice torch tensor 4 1541 0 1051 0 1667 0 9144 0 2939 0 0086 0 8472 0 7281 0 0256 devicetorchdevice self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch fnet model override configtester skip tests todo fix the failed tests special case for forpretraining model overriden tests tuple_inputs self _prepare_for_class inputs_dict model_class dict_inputs self _prepare_for_class inputs_dict model_class check_equivalence model tuple_inputs dict_inputs output_hidden_states true no need to test all models as different heads yield the same functionality for comparison 1 modify the pre training model __call__ to skip computing metrics and return masked_lm_output like so sequence_output pooled_output encodermodel self config random_seed self random_seed name encoder input_ids input_mask type_ids deterministic deterministic masked_lm_output nn dense self config d_emb kernel_init default_kernel_init name predictions_dense sequence_output masked_lm_output nn gelu masked_lm_output masked_lm_output nn layernorm epsilon layer_norm_epsilon name predictions_layer_norm masked_lm_output masked_lm_logits layers outputprojection kernel self _get_embedding_table name predictions_output masked_lm_output next_sentence_logits layers outputprojection n_out 2 kernel_init default_kernel_init name classification pooled_output return masked_lm_logits 2 run the following import jax numpy as jnp import sentencepiece as spm from flax training import checkpoints from f_net models import pretrainingmodel from f_net configs pretraining import get_config modelarchitecture pretrained_params checkpoints restore_checkpoint f_net f_net_checkpoint none location of original checkpoint pretrained_config get_config pretrained_config model_arch modelarchitecture f_net vocab_filepath f_net c4_bpe_sentencepiece model location of the sentence piece model tokenizer spm sentencepieceprocessor tokenizer load vocab_filepath with pretrained_config unlocked pretrained_config vocab_size tokenizer getpiecesize tokens jnp array 0 1 2 3 4 5 type_ids jnp zeros_like tokens dtype i4 attention_mask jnp ones_like tokens dummy this gets deleted inside the model flax_pretraining_model pretrainingmodel pretrained_config pretrained_model_params freeze pretrained_params target flax_model_outputs flax_pretraining_model apply params pretrained_model_params tokens attention_mask type_ids none none none none deterministic true masked_lm_logits 3 3 fmt skip
import unittest from typing import Dict, List, Tuple from transformers import FNetConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetModel, FNetTokenizerFast, ) from transformers.models.fnet.modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetBasicFourierTransform, is_scipy_available, ) class FNetConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) if self.has_text_modality: self.parent.assertTrue(hasattr(config, "vocab_size")) self.parent.assertTrue(hasattr(config, "hidden_size")) self.parent.assertTrue(hasattr(config, "num_hidden_layers")) class FNetModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels def get_config(self): return FNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, tpu_short_seq_length=self.seq_length, ) @require_torch def create_and_check_fourier_transform(self, config): hidden_states = floats_tensor([self.batch_size, self.seq_length, config.hidden_size]) transform = FNetBasicFourierTransform(config) fftn_output = transform(hidden_states) config.use_tpu_fourier_optimizations = True if is_scipy_available(): transform = FNetBasicFourierTransform(config) dft_output = transform(hidden_states) config.max_position_embeddings = 4097 transform = FNetBasicFourierTransform(config) fft_output = transform(hidden_states) if is_scipy_available(): self.parent.assertTrue(torch.allclose(fftn_output[0][0], dft_output[0][0], atol=1e-4)) self.parent.assertTrue(torch.allclose(fft_output[0][0], dft_output[0][0], atol=1e-4)) self.parent.assertTrue(torch.allclose(fftn_output[0][0], fft_output[0][0], atol=1e-4)) def create_and_check_model(self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels): model = FNetModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels ): model = FNetForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels ): model = FNetForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_next_sentence_prediction( self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels ): model = FNetForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, token_type_ids=token_type_ids, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels ): model = FNetForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = FNetForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = FNetForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = FNetForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids} return config, inputs_dict @require_torch class FNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FNetModel, FNetForPreTraining, FNetForMaskedLM, FNetForNextSentencePrediction, FNetForMultipleChoice, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": FNetModel, "fill-mask": FNetForMaskedLM, "question-answering": FNetForQuestionAnswering, "text-classification": FNetForSequenceClassification, "token-classification": FNetForTokenClassification, "zero-shot": FNetForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_head_masking = False test_pruning = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_attention_outputs(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) def setUp(self): self.model_tester = FNetModelTester(self) self.config_tester = FNetConfigTester(self, config_class=FNetConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in FNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FNetModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class FNetModelIntegrationTest(unittest.TestCase): @slow def test_inference_for_masked_lm(self): model = FNetForMaskedLM.from_pretrained("google/fnet-base") model.to(torch_device) input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device) with torch.no_grad(): output = model(input_ids)[0] vocab_size = 32000 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-1.7819, -7.7384, -7.5002], [-3.4746, -8.5943, -7.7762], [-3.2052, -9.0771, -8.3468]]], device=torch_device, ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow @require_tokenizers def test_inference_long_sentence(self): tokenizer = FNetTokenizerFast.from_pretrained("google/fnet-base") inputs = tokenizer( "the man worked as a [MASK].", "this is his [MASK].", return_tensors="pt", padding="max_length", max_length=512, ) torch.testing.assert_allclose(inputs["input_ids"], torch.tensor([[4, 13, 283, 2479, 106, 8, 6, 845, 5, 168, 65, 367, 6, 845, 5, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3, 3, 3, 3, 3, 3, 3, 3, 3, 3,3]])) inputs = {k: v.to(torch_device) for k, v in inputs.items()} model = FNetForMaskedLM.from_pretrained("google/fnet-base") model.to(torch_device) logits = model(**inputs).logits predictions_mask_1 = tokenizer.decode(logits[0, 6].topk(5).indices) predictions_mask_2 = tokenizer.decode(logits[0, 12].topk(5).indices) self.assertEqual(predictions_mask_1.split(" "), ["man", "child", "teacher", "woman", "model"]) self.assertEqual(predictions_mask_2.split(" "), ["work", "wife", "job", "story", "name"]) @slow def test_inference_for_next_sentence_prediction(self): model = FNetForNextSentencePrediction.from_pretrained("google/fnet-base") model.to(torch_device) input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 2)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[-0.2234, -0.0226]], device=torch_device) self.assertTrue(torch.allclose(output, expected_slice, atol=1e-4)) @slow def test_inference_model(self): model = FNetModel.from_pretrained("google/fnet-base") model.to(torch_device) input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 6, model.config.hidden_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[4.1541, -0.1051, -0.1667], [-0.9144, 0.2939, -0.0086], [-0.8472, -0.7281, 0.0256]]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
codingutf8 2019 hugging face inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token pad tokenid 0 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 pad self assertequalvocabkeys1 unk self assertequalvocabkeys1 eloquent self assertequallenvocabkeys 30000 def testvocabsizeself self assertequalself gettokenizer vocabsize 30000 def testrustandpythonfulltokenizersself if not self testrusttokenizer return tokenizer self gettokenizer rusttokenizer self getrusttokenizer sequence i was born in 92000 and this is fals tokens tokenizer tokenizesequence rusttokens rusttokenizer tokenizesequence self assertlistequaltokens rusttokens ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids rusttokenizer self getrusttokenizer ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids def testfulltokenizerself tokenizer fnettokenizersamplevocab keepaccentstrue tokens tokenizer tokenizethis is a test self assertlistequaltokens t his is a test self assertlistequaltokenizer converttokenstoidstokens 13 1 4398 25 21 1289 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens i was born in 9 2000 and this is fal s ids tokenizer converttokenstoidstokens self assertlistequalids 13 1 23 386 19 561 3050 15 17 48 25 8256 18 1 9 backtokens tokenizer convertidstotokensids self assertlistequal backtokens unk was born in 9 2000 and this is fal s unk def testsequencebuildersself tokenizer fnettokenizersamplevocab text tokenizer encodesequence builders text2 tokenizer encodemultisequence build encodedsentence tokenizer buildinputswithspecialtokenstext encodedpair tokenizer buildinputswithspecialtokenstext text2 assert encodedsentence tokenizer clstokenid text tokenizer septokenid assert encodedpair tokenizer clstokenid text tokenizer septokenid text2 tokenizer septokenid overriden tests loading the fast tokenizer from slow just takes too long def testspecialtokensinitializationself for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname addedtokens addedtokenspecial lstriptrue tokenizerr self rusttokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs routput tokenizerr encodehey this is a special token specialtokenid tokenizerr encodespecial addspecialtokensfalse0 self asserttruespecialtokenid in routput if self testslowtokenizer tokenizerp self tokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs poutput tokenizerp encodehey this is a special token croutput tokenizerr encodehey this is a special token self assertequalpoutput routput self assertequalcroutput routput self asserttruespecialtokenid in poutput self asserttruespecialtokenid in croutput tooslow def testspecialtokensinitializationfromslowself for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname addedtokens addedtokenspecial lstriptrue tokenizerr self rusttokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs fromslowtrue specialtokenid tokenizerr encodespecial addspecialtokensfalse0 tokenizerp self tokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs poutput tokenizerp encodehey this is a special token croutput tokenizerr encodehey this is a special token self assertequalpoutput croutput self asserttruespecialtokenid in poutput self asserttruespecialtokenid in croutput overriden tests def testpaddingself maxlength50 if not self testslowtokenizer as we don t have a slow version we can t compare the outputs between slow and fast versions return for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs tokenizerp self tokenizerclass frompretrainedpretrainedname kwargs self assertequaltokenizerp padtokenid tokenizerr padtokenid padtokenid tokenizerp padtokenid encode simple input inputr tokenizerr encodethis is a simple input maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp encodethis is a simple input maxlengthmaxlength padtomaxlengthtrue self assertpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr encodethis is a simple input maxlengthmaxlength paddingmaxlength inputp tokenizerp encodethis is a simple input maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr encodethis is a simple input paddinglongest inputp tokenizerp encodethis is a simple input paddingtrue self assertpaddedinputmatchinputr inputp leninputr padtokenid encode pair input inputr tokenizerr encode this is a simple input this is a pair maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp encode this is a simple input this is a pair maxlengthmaxlength padtomaxlengthtrue self assertpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr encode this is a simple input this is a pair maxlengthmaxlength paddingmaxlength inputp tokenizerp encode this is a simple input this is a pair maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr encodethis is a simple input this is a pair paddingtrue inputp tokenizerp encodethis is a simple input this is a pair paddinglongest self assertpaddedinputmatchinputr inputp leninputr padtokenid encodeplus simple input inputr tokenizerr encodeplus this is a simple input maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp encodeplus this is a simple input maxlengthmaxlength padtomaxlengthtrue self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid inputr tokenizerr encodeplus this is a simple input maxlengthmaxlength paddingmaxlength inputp tokenizerp encodeplus this is a simple input maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid inputr tokenizerr encodeplusthis is a simple input paddinglongest inputp tokenizerp encodeplusthis is a simple input paddingtrue self assertpaddedinputmatch inputrinputids inputpinputids leninputrinputids padtokenid encodeplus pair input inputr tokenizerr encodeplus this is a simple input this is a pair maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp encodeplus this is a simple input this is a pair maxlengthmaxlength padtomaxlengthtrue self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid inputr tokenizerr encodeplus this is a simple input this is a pair maxlengthmaxlength paddingmaxlength inputp tokenizerp encodeplus this is a simple input this is a pair maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid inputr tokenizerr encodeplusthis is a simple input this is a pair paddinglongest inputp tokenizerp encodeplusthis is a simple input this is a pair paddingtrue self assertpaddedinputmatch inputrinputids inputpinputids leninputrinputids padtokenid batchencodeplus simple input inputr tokenizerr batchencodeplus this is a simple input 1 this is a simple input 2 maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp batchencodeplus this is a simple input 1 this is a simple input 2 maxlengthmaxlength padtomaxlengthtrue self assertbatchpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr batchencodeplus this is a simple input 1 this is a simple input 2 maxlengthmaxlength paddingmaxlength inputp tokenizerp batchencodeplus this is a simple input 1 this is a simple input 2 maxlengthmaxlength paddingmaxlength self assertbatchpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr batchencodeplus this is a simple input 1 this is a simple input 2 maxlengthmaxlength paddinglongest inputp tokenizerp batchencodeplus this is a simple input 1 this is a simple input 2 maxlengthmaxlength paddingtrue self assertbatchpaddedinputmatchinputr inputp leninputrinputids0 padtokenid inputr tokenizerr batchencodeplus this is a simple input 1 this is a simple input 2 paddinglongest inputp tokenizerp batchencodeplus this is a simple input 1 this is a simple input 2 paddingtrue self assertbatchpaddedinputmatchinputr inputp leninputrinputids0 padtokenid batchencodeplus pair input inputr tokenizerr batchencodeplus this is a simple input 1 this is a simple input 2 this is a simple pair 1 this is a simple pair 2 maxlengthmaxlength truncationtrue paddingmaxlength inputp tokenizerp batchencodeplus this is a simple input 1 this is a simple input 2 this is a simple pair 1 this is a simple pair 2 maxlengthmaxlength truncationtrue paddingmaxlength self assertbatchpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr batchencodeplus this is a simple input 1 this is a simple input 2 this is a simple pair 1 this is a simple pair 2 paddingtrue inputp tokenizerp batchencodeplus this is a simple input 1 this is a simple input 2 this is a simple pair 1 this is a simple pair 2 paddinglongest self assertbatchpaddedinputmatchinputr inputp leninputrinputids0 padtokenid using pad on single examples after tokenization inputr tokenizerr encodeplusthis is a input 1 inputr tokenizerr padinputr inputp tokenizerr encodeplusthis is a input 1 inputp tokenizerr padinputp self assertpaddedinputmatch inputrinputids inputpinputids leninputrinputids padtokenid using pad on single examples after tokenization inputr tokenizerr encodeplusthis is a input 1 inputr tokenizerr padinputr maxlengthmaxlength paddingmaxlength inputp tokenizerr encodeplusthis is a input 1 inputp tokenizerr padinputp maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid using pad after tokenization inputr tokenizerr batchencodeplus this is a input 1 this is a much longer input whilch should be padded inputr tokenizerr padinputr inputp tokenizerr batchencodeplus this is a input 1 this is a much longer input whilch should be padded inputp tokenizerr padinputp self assertbatchpaddedinputmatchinputr inputp leninputrinputids0 padtokenid using pad after tokenization inputr tokenizerr batchencodeplus this is a input 1 this is a much longer input whilch should be padded inputr tokenizerr padinputr maxlengthmaxlength paddingmaxlength inputp tokenizerr batchencodeplus this is a input 1 this is a much longer input whilch should be padded inputp tokenizerr padinputp maxlengthmaxlength paddingmaxlength self assertbatchpaddedinputmatchinputr inputp maxlength padtokenid slow def testsavepretrainedself super testsavepretrained slow def testsaveslowfromfastandreloadfastself super testsaveslowfromfastandreloadfast def assertbatchpaddedinputmatch self inputr dict inputp dict maxlength int padtokenid int modelmaininputname str inputids for ir in inputr values self assertequallenir 2 self assertequallenir0 maxlength self assertequallenir1 maxlength self assertequallenir 2 self assertequallenir0 maxlength self assertequallenir1 maxlength for ir ip in zipinputrmodelmaininputname inputpmodelmaininputname self assertpaddedinputmatchir ip maxlength padtokenid slow def testtokenizerintegrationself expectedencoding inputids 4 4616 107 163 328 14 63 1726 106 11954 16659 23 83 16688 11427 328 107 36 11954 16659 23 83 16688 6153 82 961 16688 3474 16710 1696 2306 16688 10854 2524 3827 561 163 3474 16680 62 226 2092 16680 379 3474 16660 16680 2436 16667 16671 16680 999 87 3474 16680 2436 16667 5208 800 16710 68 2018 2959 3037 163 16663 11617 16710 36 2018 2959 4737 163 16663 16667 16674 16710 91 372 5087 16745 2205 82 961 3608 38 1770 16745 7984 36 2565 751 9017 1204 864 218 1244 16680 11954 16659 23 83 36 14686 23 7619 16678 5 4 28 532 65 1929 33 391 16688 3979 9 2565 7849 299 225 34 2040 305 167 289 16667 16078 32 1966 181 4626 63 10575 71 851 1491 36 624 4757 38 208 8038 16678 5 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 13 1467 5187 26 2521 4567 16664 372 13 16209 3314 16678 5 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 tokentypeids 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamegooglefnetbase revision34219a71ca20e280cc6000b89673a169c65d605c coding utf 8 2019 hugging face inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token overriden tests loading the fast tokenizer from slow just takes too long overriden tests as we don t have a slow version we can t compare the outputs between slow and fast versions encode simple input encode pair input encode_plus simple input encode_plus pair input batch_encode_plus simple input batch_encode_plus pair input using pad on single examples after tokenization using pad on single examples after tokenization using pad after tokenization using pad after tokenization fmt skip
import unittest from transformers import FNetTokenizer, FNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow, tooslow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class FNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = FNetTokenizer rust_tokenizer_class = FNetTokenizerFast test_rust_tokenizer = True test_sentencepiece = True test_sentencepiece_ignore_case = True test_seq2seq = False def setUp(self): super().setUp() tokenizer = FNetTokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_text = "this is a test" output_text = "this is a test" return input_text, output_text def test_convert_token_and_id(self): token = "<pad>" token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<pad>") self.assertEqual(vocab_keys[1], "<unk>") self.assertEqual(vocab_keys[-1], "▁eloquent") self.assertEqual(len(vocab_keys), 30_000) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 30_000) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_full_tokenizer(self): tokenizer = FNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁", "T", "his", "▁is", "▁a", "▁test"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [13, 1, 4398, 25, 21, 1289]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ "▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ], ) def test_sequence_builders(self): tokenizer = FNetTokenizer(SAMPLE_VOCAB) text = tokenizer.encode("sequence builders") text_2 = tokenizer.encode("multi-sequence build") encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ] def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) r_output = tokenizer_r.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_r.encode("Hey this is a <special> token") self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) @tooslow def test_special_tokens_initialization_from_slow(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_r.encode("Hey this is a <special> token") self.assertEqual(p_output, cr_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) def test_padding(self, max_length=50): if not self.test_slow_tokenizer: return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, pad_to_max_length=True) input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, pad_to_max_length=True) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, padding="max_length") input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode("This is a simple input", padding="longest") input_p = tokenizer_p.encode("This is a simple input", padding=True) self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) input_r = tokenizer_r.encode( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode("This is a simple input", "This is a pair", padding=True) input_p = tokenizer_p.encode("This is a simple input", "This is a pair", padding="longest") self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) input_r = tokenizer_r.encode_plus( "This is a simple input", max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode_plus( "This is a simple input", max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) input_r = tokenizer_r.encode_plus( "This is a simple input", max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode_plus( "This is a simple input", max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) input_r = tokenizer_r.encode_plus("This is a simple input", padding="longest") input_p = tokenizer_p.encode_plus("This is a simple input", padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) input_r = tokenizer_r.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) input_r = tokenizer_r.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) input_r = tokenizer_r.encode_plus("This is a simple input", "This is a pair", padding="longest") input_p = tokenizer_p.encode_plus("This is a simple input", "This is a pair", padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True, ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True, ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="longest", ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding=True, ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], padding="longest" ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], padding=True ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) input_r = tokenizer_r.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], max_length=max_length, truncation=True, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], max_length=max_length, truncation=True, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], padding=True, ) input_p = tokenizer_p.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], padding="longest", ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) input_r = tokenizer_r.encode_plus("This is a input 1") input_r = tokenizer_r.pad(input_r) input_p = tokenizer_r.encode_plus("This is a input 1") input_p = tokenizer_r.pad(input_p) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) input_r = tokenizer_r.encode_plus("This is a input 1") input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_r.encode_plus("This is a input 1") input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_r = tokenizer_r.pad(input_r) input_p = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_p = tokenizer_r.pad(input_p) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) @slow def test_save_pretrained(self): super().test_save_pretrained() @slow def test_save_slow_from_fast_and_reload_fast(self): super().test_save_slow_from_fast_and_reload_fast() def assert_batch_padded_input_match( self, input_r: dict, input_p: dict, max_length: int, pad_token_id: int, model_main_input_name: str = "input_ids", ): for i_r in input_r.values(): ( self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length), ) ( self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length), ) for i_r, i_p in zip(input_r[model_main_input_name], input_p[model_main_input_name]): self.assert_padded_input_match(i_r, i_p, max_length, pad_token_id) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[4, 4616, 107, 163, 328, 14, 63, 1726, 106, 11954, 16659, 23, 83, 16688, 11427, 328, 107, 36, 11954, 16659, 23, 83, 16688, 6153, 82, 961, 16688, 3474, 16710, 1696, 2306, 16688, 10854, 2524, 3827, 561, 163, 3474, 16680, 62, 226, 2092, 16680, 379, 3474, 16660, 16680, 2436, 16667, 16671, 16680, 999, 87, 3474, 16680, 2436, 16667, 5208, 800, 16710, 68, 2018, 2959, 3037, 163, 16663, 11617, 16710, 36, 2018, 2959, 4737, 163, 16663, 16667, 16674, 16710, 91, 372, 5087, 16745, 2205, 82, 961, 3608, 38, 1770, 16745, 7984, 36, 2565, 751, 9017, 1204, 864, 218, 1244, 16680, 11954, 16659, 23, 83, 36, 14686, 23, 7619, 16678, 5], [4, 28, 532, 65, 1929, 33, 391, 16688, 3979, 9, 2565, 7849, 299, 225, 34, 2040, 305, 167, 289, 16667, 16078, 32, 1966, 181, 4626, 63, 10575, 71, 851, 1491, 36, 624, 4757, 38, 208, 8038, 16678, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], [4, 13, 1467, 5187, 26, 2521, 4567, 16664, 372, 13, 16209, 3314, 16678, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="google/fnet-base", revision="34219a71ca20e280cc6000b89673a169c65d605c", )
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch focalnet model import collections import unittest from transformers import focalnetconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testbackbonecommon import backbonetestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import focalnetbackbone focalnetforimageclassification focalnetformaskedimagemodeling focalnetmodel from transformers models focalnet modelingfocalnet import focalnetpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class focalnetmodeltester def init self parent batchsize13 imagesize32 patchsize2 numchannels3 embeddim16 hiddensizes32 64 128 depths1 2 1 numheads2 2 4 windowsize2 mlpratio2 0 qkvbiastrue hiddendropoutprob0 0 attentionprobsdropoutprob0 0 droppathrate0 1 hiddenactgelu useabsoluteembeddingsfalse patchnormtrue initializerrange0 02 layernormeps1e5 istrainingtrue scopenone uselabelstrue typesequencelabelsize10 encoderstride8 outfeaturesstage1 stage2 outindices1 2 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self embeddim embeddim self hiddensizes hiddensizes self depths depths self numheads numheads self windowsize windowsize self mlpratio mlpratio self qkvbias qkvbias self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self droppathrate droppathrate self hiddenact hiddenact self useabsoluteembeddings useabsoluteembeddings self patchnorm patchnorm self layernormeps layernormeps self initializerrange initializerrange self istraining istraining self scope scope self uselabels uselabels self typesequencelabelsize typesequencelabelsize self encoderstride encoderstride self outfeatures outfeatures self outindices outindices def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return focalnetconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels embeddimself embeddim hiddensizesself hiddensizes depthsself depths numheadsself numheads windowsizeself windowsize mlpratioself mlpratio qkvbiasself qkvbias hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob droppathrateself droppathrate hiddenactself hiddenact useabsoluteembeddingsself useabsoluteembeddings pathnormself patchnorm layernormepsself layernormeps initializerrangeself initializerrange encoderstrideself encoderstride outfeaturesself outfeatures outindicesself outindices def createandcheckmodelself config pixelvalues labels model focalnetmodelconfigconfig model totorchdevice model eval result modelpixelvalues expectedseqlen config imagesize config patchsize 2 4 lenconfig depths 1 expecteddim intconfig embeddim 2 lenconfig depths 1 self parent assertequalresult lasthiddenstate shape self batchsize expectedseqlen expecteddim def createandcheckbackboneself config pixelvalues labels model focalnetbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps lenconfig outfeatures self parent assertlistequallistresult featuremaps0 shape self batchsize self imagesize 8 8 verify channels self parent assertequallenmodel channels lenconfig outfeatures self parent assertlistequalmodel channels config hiddensizes 1 verify backbone works with outfeaturesnone config outfeatures none model focalnetbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequallistresult featuremaps0 shape self batchsize self imagesize 2 4 4 verify channels self parent assertequallenmodel channels 1 self parent assertlistequalmodel channels config hiddensizes1 def createandcheckformaskedimagemodelingself config pixelvalues labels model focalnetformaskedimagemodelingconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result reconstruction shape self batchsize self numchannels self imagesize self imagesize test greyscale images config numchannels 1 model focalnetformaskedimagemodelingconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult reconstruction shape self batchsize 1 self imagesize self imagesize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model focalnetforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model focalnetforimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class focalnetmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses focalnetmodel focalnetforimageclassification focalnetformaskedimagemodeling focalnetbackbone if istorchavailable else pipelinemodelmapping featureextraction focalnetmodel imageclassification focalnetforimageclassification if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester focalnetmodeltesterself self configtester configtesterself configclassfocalnetconfig embeddim37 hastextmodalityfalse def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testbackboneself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbackboneconfigandinputs def testformaskedimagemodelingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedimagemodelingconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs unittest skipreasonfocalnet does not use inputsembeds def testinputsembedsself pass unittest skipreasonfocalnet does not use feedforward chunking def testfeedforwardchunkingself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses 1 model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def checkhiddenstatesoutputself inputsdict config modelclass imagesize model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers lenself modeltester depths 1 self assertequallenhiddenstates expectednumlayers focalnet has a different seqlength patchsize config patchsize if isinstanceconfig patchsize collections abc iterable else config patchsize config patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self assertlistequal listhiddenstates0 shape2 numpatches self modeltester embeddim reshapedhiddenstates outputs reshapedhiddenstates self assertequallenreshapedhiddenstates expectednumlayers batchsize numchannels height width reshapedhiddenstates0 shape reshapedhiddenstates reshapedhiddenstates0 viewbatchsize numchannels height width permute0 2 1 self assertlistequal listreshapedhiddenstates shape2 numpatches self modeltester embeddim def testhiddenstatesoutputself config inputsdict self modeltester prepareconfigandinputsforcommon imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize for modelclass in self allmodelclasses 1 inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize def testhiddenstatesoutputwithpaddingself config inputsdict self modeltester prepareconfigandinputsforcommon config patchsize 3 imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize patchsize config patchsize if isinstanceconfig patchsize collections abc iterable else config patchsize config patchsize paddedheight imagesize0 patchsize0 imagesize0 patchsize0 paddedwidth imagesize1 patchsize1 imagesize1 patchsize1 for modelclass in self allmodelclasses 1 inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth slow def testmodelfrompretrainedself for modelname in focalnetpretrainedmodelarchivelist 1 model focalnetmodel frompretrainedmodelname self assertisnotnonemodel def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if embeddings not in name and param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized requirevision requiretorch class focalnetmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself todo update organization return autoimageprocessor frompretrainedmicrosoftfocalnettiny if isvisionavailable else none slow def testinferenceimageclassificationheadself model focalnetforimageclassification frompretrainedmicrosoftfocalnettiny totorchdevice imageprocessor self defaultimageprocessor image image open testsfixturestestssamplescoco000000039769 png inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 2166 0 4368 0 2191 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 self asserttrueoutputs logits argmaxdim1 item 281 requiretorch class focalnetbackbonetestbackbonetestermixin unittest testcase allmodelclasses focalnetbackbone if istorchavailable else configclass focalnetconfig hasattentions false def setupself self modeltester focalnetmodeltesterself coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch focalnet model verify feature maps verify channels verify backbone works with out_features none verify feature maps verify channels test greyscale images test greyscale images focalnet has a different seq_length check that output_hidden_states also work using config check that output_hidden_states also work using config todo update organization forward pass verify the logits
import collections import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class FocalNetModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, hidden_sizes=[32, 64, 128], depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, out_features=["stage1", "stage2"], out_indices=[1, 2], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.hidden_sizes = hidden_sizes self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return FocalNetConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, hidden_sizes=self.hidden_sizes, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = FocalNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_backbone(self, config, pixel_values, labels): model = FocalNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size, 8, 8]) self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1]) config.out_features = None model = FocalNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size * 2, 4, 4]) self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = FocalNetForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) config.num_channels = 1 model = FocalNetForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = FocalNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = FocalNetForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class FocalNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = FocalNetModelTester(self) self.config_tester = ConfigTester(self, config_class=FocalNetConfig, embed_dim=37, has_text_modality=False) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="FocalNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="FocalNet does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FocalNetModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_vision @require_torch class FocalNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny").to(torch_device) image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.2166, -0.4368, 0.2191]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) self.assertTrue(outputs.logits.argmax(dim=-1).item(), 281) @require_torch class FocalNetBackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (FocalNetBackbone,) if is_torch_available() else () config_class = FocalNetConfig has_attentions = False def setUp(self): self.model_tester = FocalNetModelTester(self)
codingutf8 2020 huggingface licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license hack needed for modelingcommon tests despite not really having this attribute in this model xxx hack to appease to all other models requiring vocabsize xxx override testmodelcommonattributes different embedding type test init self asserttruemodel encoder embedtokens weight model shared weight all item check that we initialized various parameters from n0 config initstd self assertalmostequaltorch stdmodule weight item config initstd 2 checkvarmodel encoder embedtokens checkvarmodel encoder layers0 selfattn kproj checkvarmodel encoder layers0 fc1 xxx different std for fairseq version of sinusoidalpositionalembedding self assertalmostequaltorch stdmodel encoder embedpositions weights item config initstd 2 def testadvancedinputsself config inputsdict self modeltester prepareconfigandinputs config usecache false inputsdictinputids 2 config padtokenid decoderinputids decoderattnmask causalmask preparefsmtdecoderinputs config inputsdictinputids model fsmtmodelconfig totorchdevice eval decoderfeatureswithcreatedmask modelinputsdict0 decoderfeatureswithpassedmask model decoderattentionmaskinvertmaskdecoderattnmask decoderinputidsdecoderinputids inputsdict 0 asserttensorsequaldecoderfeatureswithpassedmask decoderfeatureswithcreatedmask uselessmask torch zeroslikedecoderattnmask decoderfeatures modeldecoderattentionmaskuselessmask inputsdict0 self asserttrueisinstancedecoderfeatures torch tensor no hidden states or attentions self assertequal decoderfeatures size self modeltester batchsize self modeltester seqlength config tgtvocabsize if decoderattnmask min item 1e3 some tokens were masked self assertfalsedecoderfeatureswithcreatedmask decoderfeatures all item test different encoder attention masks decoderfeatureswithlongencodermask model inputsdictinputids attentionmaskinputsdictattentionmask long 0 asserttensorsequaldecoderfeatureswithlongencodermask decoderfeatureswithcreatedmask def testsaveloadmissingkeysself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys unittest skiptest has a segmentation fault on torch 1 8 0 def testexporttoonnxself config inputsdict self modeltester prepareconfigandinputs model fsmtmodelconfig totorchdevice with tempfile temporarydirectory as tmpdirname torch onnx export model inputsdictinputids inputsdictattentionmask ftmpdirnamefsmttest onnx exportparamstrue opsetversion12 inputnamesinputids attentionmask def testensureweightsaresharedself config inputsdict self modeltester prepareconfigandinputs config tiewordembeddings true model fsmtforconditionalgenerationconfig fsmt shares three weights not an issue to not have these correctly tied for torch load but it is an issue for safetensors self assertequal len model getoutputembeddings weight dataptr model getinputembeddings weight dataptr model basemodel decoder outputprojection weight dataptr 1 config tiewordembeddings false model fsmtforconditionalgenerationconfig fsmt shares three weights not an issue to not have these correctly tied for torch load but it is an issue for safetensors self assertequal len model getoutputembeddings weight dataptr model getinputembeddings weight dataptr model basemodel decoder outputprojection weight dataptr 2 unittest skipcan t be implemented for fsmt due to dual vocab def testresizetokensembeddingsself pass unittest skippassing inputsembeds not implemented for fsmt def testinputsembedsself pass unittest skipmodel weights aren t tied in fsmt def testtiemodelweightsself pass unittest skiptodo decoder embeddings cannot be resized at the moment def testresizeembeddingsuntiedself pass requiretorch class fsmtheadtestsunittest testcase srcvocabsize 99 tgtvocabsize 99 langs ru en def getconfigself return fsmtconfig srcvocabsizeself srcvocabsize tgtvocabsizeself tgtvocabsize langsself langs dmodel24 encoderlayers2 decoderlayers2 encoderattentionheads2 decoderattentionheads2 encoderffndim32 decoderffndim32 maxpositionembeddings48 eostokenid2 padtokenid1 bostokenid0 def getconfiganddataself inputids torch tensor 71 82 18 33 46 91 2 68 34 26 58 30 82 2 5 97 17 39 94 40 2 76 83 94 25 70 78 2 87 59 41 35 48 66 2 55 13 16 58 5 2 1 note padding 64 27 31 51 12 75 2 52 64 86 17 83 39 2 48 61 9 24 71 82 2 26 1 60 48 22 13 2 21 5 62 28 14 76 2 45 98 37 86 59 48 2 70 70 50 9 28 0 2 dtypetorch long devicetorchdevice batchsize inputids shape0 config self getconfig return config inputids batchsize def testgeneratebeamsearchself inputids torch tensor71 82 2 68 34 2 dtypetorch long devicetorchdevice config self getconfig lmmodel fsmtforconditionalgenerationconfig totorchdevice lmmodel eval maxlength 5 newinputids lmmodel generate inputids clone dosampletrue numreturnsequences1 numbeams2 norepeatngramsize3 maxlengthmaxlength self assertequalnewinputids shape inputids shape0 maxlength def testshifttokensrightself inputids torch tensor71 82 18 33 2 1 1 68 34 26 58 30 82 2 dtypetorch long shifted shifttokensrightinputids 1 npadbefore inputids eq1 float sum npadafter shifted eq1 float sum self assertequalshifted shape inputids shape self assertequalnpadafter npadbefore 1 self asserttruetorch eqshifted 0 2 all requiretorchfp16 def testgeneratefp16self config inputids batchsize self getconfiganddata attentionmask inputids ne1 totorchdevice model fsmtforconditionalgenerationconfig eval totorchdevice model half model generateinputids attentionmaskattentionmask model generatenumbeams4 dosampletrue earlystoppingfalse numreturnsequences3 def testdummyinputsself config self getconfiganddata model fsmtforconditionalgenerationconfig eval totorchdevice modelmodel dummyinputs def testpreparefsmtdecoderinputsself config self getconfiganddata inputids longtensor4 4 2 decoderinputids longtensor26388 2 config padtokenid causalmaskdtype torch float32 ignore torch finfocausalmaskdtype min decoderinputids decoderattnmask causalmask preparefsmtdecoderinputs config inputids decoderinputids causalmaskdtypecausalmaskdtype expectedcausalmask torch tensor 0 ignore ignore 0 0 ignore 0 0 0 never attend to the final token because its pad toinputids device self assertequaldecoderattnmask size decoderinputids size self asserttruetorch eqexpectedcausalmask causalmask all def asserttensorsequala b atol1e12 prefix expected numbers were generated when enru model using just fairseq s model4 pt may have to adjust if switched to a different checkpoint odd embeddingdim is allowed odd numembeddings is allowed xxx only the 1st and 3rd lines match this is testing against verbatim copy of sinusoidalpositionalembedding from fairseq test that forward pass is just a lookup there is no ignore padding logic xxx only the 1st line matches the 3rd coding utf 8 2020 huggingface licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa hack needed for modeling_common tests despite not really having this attribute in this model eos token hack needed for common tests xxx hack to appease to all other models requiring vocab_size no such thing in fsmt xxx override test_model_common_attributes different embedding type test init self asserttrue model encoder embed_tokens weight model shared weight all item check that we initialized various parameters from n 0 config init_std xxx different std for fairseq version of sinusoidalpositionalembedding self assertalmostequal torch std model encoder embed_positions weights item config init_std 2 no hidden states or attentions some tokens were masked test different encoder attention masks fsmt shares three weights not an issue to not have these correctly tied for torch load but it is an issue for safetensors fsmt shares three weights not an issue to not have these correctly tied for torch load but it is an issue for safetensors note padding never attend to the final token because its pad if tensors not close or a and b arent both tensors raise a nice assertion error expected numbers were generated when en ru model using just fairseq s model4 pt may have to adjust if switched to a different checkpoint odd embedding_dim is allowed odd num_embeddings is allowed xxx only the 1st and 3rd lines match this is testing against verbatim copy of sinusoidalpositionalembedding from fairseq test that forward pass is just a lookup there is no ignore padding logic xxx only the 1st line matches the 3rd
import tempfile import unittest import timeout_decorator from parameterized import parameterized from transformers import FSMTConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import FSMTForConditionalGeneration, FSMTModel, FSMTTokenizer from transformers.models.fsmt.modeling_fsmt import ( SinusoidalPositionalEmbedding, _prepare_fsmt_decoder_inputs, invert_mask, shift_tokens_right, ) from transformers.pipelines import TranslationPipeline class FSMTModelTester: def __init__( self, parent, src_vocab_size=99, tgt_vocab_size=99, langs=["ru", "en"], batch_size=13, seq_length=7, is_training=False, use_labels=False, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, bos_token_id=0, pad_token_id=1, eos_token_id=2, ): self.parent = parent self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.langs = langs self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.eos_token_id = eos_token_id torch.manual_seed(0) self.vocab_size = self.src_vocab_size def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.src_vocab_size).clamp( 3, ) input_ids[:, -1] = 2 config = self.get_config() inputs_dict = prepare_fsmt_inputs_dict(config, input_ids) return config, inputs_dict def get_config(self): return FSMTConfig( vocab_size=self.src_vocab_size, src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict["decoder_attention_mask"] = inputs_dict["attention_mask"] inputs_dict["use_cache"] = False return config, inputs_dict def prepare_fsmt_inputs_dict( config, input_ids, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_torch class FSMTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FSMTModel, FSMTForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (FSMTForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": FSMTForConditionalGeneration, "feature-extraction": FSMTModel, "summarization": FSMTForConditionalGeneration, "text2text-generation": FSMTForConditionalGeneration, "translation": FSMTForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = FSMTModelTester(self) self.langs = ["en", "ru"] config = { "langs": self.langs, "src_vocab_size": 10, "tgt_vocab_size": 20, } config["vocab_size"] = 99 self.config_tester = ConfigTester(self, config_class=FSMTConfig, **config) def test_config(self): self.config_tester.run_common_tests() def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding)) model.set_input_embeddings(nn.Embedding(10, 10)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.modules.sparse.Embedding)) def test_initialization_more(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = FSMTModel(config) model.to(torch_device) model.eval() def _check_var(module): self.assertAlmostEqual(torch.std(module.weight).item(), config.init_std, 2) _check_var(model.encoder.embed_tokens) _check_var(model.encoder.layers[0].self_attn.k_proj) _check_var(model.encoder.layers[0].fc1) def test_advanced_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_cache = False inputs_dict["input_ids"][:, -2:] = config.pad_token_id decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, inputs_dict["input_ids"] ) model = FSMTModel(config).to(torch_device).eval() decoder_features_with_created_mask = model(**inputs_dict)[0] decoder_features_with_passed_mask = model( decoder_attention_mask=invert_mask(decoder_attn_mask), decoder_input_ids=decoder_input_ids, **inputs_dict )[0] _assert_tensors_equal(decoder_features_with_passed_mask, decoder_features_with_created_mask) useless_mask = torch.zeros_like(decoder_attn_mask) decoder_features = model(decoder_attention_mask=useless_mask, **inputs_dict)[0] self.assertTrue(isinstance(decoder_features, torch.Tensor)) self.assertEqual( decoder_features.size(), (self.model_tester.batch_size, self.model_tester.seq_length, config.tgt_vocab_size), ) if decoder_attn_mask.min().item() < -1e3: self.assertFalse((decoder_features_with_created_mask == decoder_features).all().item()) decoder_features_with_long_encoder_mask = model( inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"].long() )[0] _assert_tensors_equal(decoder_features_with_long_encoder_mask, decoder_features_with_created_mask) def test_save_load_missing_keys(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = FSMTModel(config).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (inputs_dict["input_ids"], inputs_dict["attention_mask"]), f"{tmpdirname}/fsmt_test.onnx", export_params=True, opset_version=12, input_names=["input_ids", "attention_mask"], ) def test_ensure_weights_are_shared(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.tie_word_embeddings = True model = FSMTForConditionalGeneration(config) self.assertEqual( len( { model.get_output_embeddings().weight.data_ptr(), model.get_input_embeddings().weight.data_ptr(), model.base_model.decoder.output_projection.weight.data_ptr(), } ), 1, ) config.tie_word_embeddings = False model = FSMTForConditionalGeneration(config) self.assertEqual( len( { model.get_output_embeddings().weight.data_ptr(), model.get_input_embeddings().weight.data_ptr(), model.base_model.decoder.output_projection.weight.data_ptr(), } ), 2, ) @unittest.skip("can't be implemented for FSMT due to dual vocab.") def test_resize_tokens_embeddings(self): pass @unittest.skip("Passing inputs_embeds not implemented for FSMT.") def test_inputs_embeds(self): pass @unittest.skip("model weights aren't tied in FSMT.") def test_tie_model_weights(self): pass @unittest.skip("TODO: Decoder embeddings cannot be resized at the moment") def test_resize_embeddings_untied(self): pass @require_torch class FSMTHeadTests(unittest.TestCase): src_vocab_size = 99 tgt_vocab_size = 99 langs = ["ru", "en"] def _get_config(self): return FSMTConfig( src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = self._get_config() return config, input_ids, batch_size def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], dtype=torch.long, device=torch_device) config = self._get_config() lm_model = FSMTForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 new_input_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(new_input_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = FSMTForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = FSMTForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_prepare_fsmt_decoder_inputs(self): config, *_ = self._get_config_and_data() input_ids = _long_tensor(([4, 4, 2])) decoder_input_ids = _long_tensor([[26388, 2, config.pad_token_id]]) causal_mask_dtype = torch.float32 ignore = torch.finfo(causal_mask_dtype).min decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, input_ids, decoder_input_ids, causal_mask_dtype=causal_mask_dtype ) expected_causal_mask = torch.tensor( [[0, ignore, ignore], [0, 0, ignore], [0, 0, 0]] ).to(input_ids.device) self.assertEqual(decoder_attn_mask.size(), decoder_input_ids.size()) self.assertTrue(torch.eq(expected_causal_mask, causal_mask).all()) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: if len(prefix) > 0: prefix = f"{prefix}: " raise AssertionError(f"{prefix}{a} != {b}") def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 pairs = [ ["en-ru"], ["ru-en"], ["en-de"], ["de-en"], ] @require_torch @require_sentencepiece @require_tokenizers class FSMTModelIntegrationTests(unittest.TestCase): tokenizers_cache = {} models_cache = {} default_mname = "facebook/wmt19-en-ru" @cached_property def default_tokenizer(self): return self.get_tokenizer(self.default_mname) @cached_property def default_model(self): return self.get_model(self.default_mname) def get_tokenizer(self, mname): if mname not in self.tokenizers_cache: self.tokenizers_cache[mname] = FSMTTokenizer.from_pretrained(mname) return self.tokenizers_cache[mname] def get_model(self, mname): if mname not in self.models_cache: self.models_cache[mname] = FSMTForConditionalGeneration.from_pretrained(mname).to(torch_device) if torch_device == "cuda": self.models_cache[mname].half() return self.models_cache[mname] @slow def test_inference_no_head(self): tokenizer = self.default_tokenizer model = FSMTModel.from_pretrained(self.default_mname).to(torch_device) src_text = "My friend computer will translate this for me" input_ids = tokenizer([src_text], return_tensors="pt")["input_ids"] input_ids = _long_tensor(input_ids).to(torch_device) inputs_dict = prepare_fsmt_inputs_dict(model.config, input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 10, model.config.tgt_vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-1.5753, -1.5753, 2.8975], [-0.9540, -0.9540, 1.0299], [-3.3131, -3.3131, 0.5219]] ).to(torch_device) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def translation_setup(self, pair): text = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } src, tgt = pair.split("-") print(f"Testing {src} -> {tgt}") mname = f"facebook/wmt19-{pair}" src_text = text[src] tgt_text = text[tgt] tokenizer = self.get_tokenizer(mname) model = self.get_model(mname) return tokenizer, model, src_text, tgt_text @parameterized.expand(pairs) @slow def test_translation_direct(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) input_ids = tokenizer.encode(src_text, return_tensors="pt").to(torch_device) outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) assert decoded == tgt_text, f"\n\ngot: {decoded}\nexp: {tgt_text}\n" @parameterized.expand(pairs) @slow def test_translation_pipeline(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) pipeline = TranslationPipeline(model, tokenizer, framework="pt", device=torch_device) output = pipeline([src_text]) self.assertEqual([tgt_text], [x["translation_text"] for x in output]) @require_torch class TestSinusoidalPositionalEmbeddings(unittest.TestCase): padding_idx = 1 tolerance = 1e-4 def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) emb1 = SinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6, padding_idx=self.padding_idx).to( torch_device ) emb = emb1(input_ids) desired_weights = torch.tensor( [ [9.0930e-01, 1.9999e-02, 2.0000e-04, -4.1615e-01, 9.9980e-01, 1.0000e00], [1.4112e-01, 2.9995e-02, 3.0000e-04, -9.8999e-01, 9.9955e-01, 1.0000e00], ] ).to(torch_device) self.assertTrue( torch.allclose(emb[0], desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n", ) def test_odd_embed_dim(self): SinusoidalPositionalEmbedding(num_positions=4, embedding_dim=5, padding_idx=self.padding_idx).to(torch_device) SinusoidalPositionalEmbedding(num_positions=5, embedding_dim=4, padding_idx=self.padding_idx).to(torch_device) @unittest.skip("different from marian (needs more research)") def test_positional_emb_weights_against_marian(self): desired_weights = torch.tensor( [ [0, 0, 0, 0, 0], [0.84147096, 0.82177866, 0.80180490, 0.78165019, 0.76140374], [0.90929741, 0.93651021, 0.95829457, 0.97505713, 0.98720258], ] ) emb1 = SinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512, padding_idx=self.padding_idx).to( torch_device ) weights = emb1.weights.data[:3, :5] self.assertTrue( torch.allclose(weights, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n", ) input_ids = torch.tensor( [[4, 10, self.padding_idx, self.padding_idx, self.padding_idx]], dtype=torch.long, device=torch_device ) no_cache_pad_zero = emb1(input_ids)[0] self.assertTrue( torch.allclose(torch.tensor(desired_weights, device=torch_device), no_cache_pad_zero[:3, :5], atol=1e-3) )
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license using a different tiny model than the one used for default params defined in init to ensure proper testing adapted from sennrich et al 2015 and https github comrsennrichsubwordnmt this just tests that the online tokenizer files get correctly fetched and loaded via its tokenizerconfig json and it s not slow so it s run by normal ci adapted from sennrich et al 2015 and https github comrsennrichsubwordnmt tokenizer fsmttokenizerself langs self srcvocabfile self tgtvocabfile self mergesfile text lower bpetokens low erw tokens tokenizer tokenizetext self assertlistequaltokens bpetokens inputtokens tokens unk inputbpetokens 14 15 20 self assertlistequaltokenizer converttokenstoidsinputtokens inputbpetokens slow def testsequencebuildersself tokenizer self tokenizerruen text tokenizer encodesequence builders addspecialtokensfalse text2 tokenizer encodemultisequence build addspecialtokensfalse encodedsentence tokenizer buildinputswithspecialtokenstext encodedpair tokenizer buildinputswithspecialtokenstext text2 assert encodedsentence text 2 assert encodedpair text 2 text2 2 slow def testmatchencodedecodeself tokenizerenc self tokenizerenru tokenizerdec self tokenizerruen targets here s a little song i wrote don t worry be happy 2470 39 11 2349 7222 70 5979 7 8450 1050 13160 5 26 6445 7 2 this is it no more i m done 132 21 37 7 1434 86 7 70 6476 1305 427 2 if data needs to be recreated or added run import torch model torch hub loadpytorchfairseq transformer wmt19 enru checkpointfilemodel4 pt tokenizermoses bpefastbpe for srctext in targets printfnsrctext n model encodesrctext tolistn for srctext tgtinputids in targets encodedids tokenizerenc encodesrctext returntensorsnone self assertlistequalencodedids tgtinputids and decode backward using the reversed languages model decodedtext tokenizerdec decodeencodedids skipspecialtokenstrue self assertequaldecodedtext srctext slow def testtokenizerlowerself tokenizer fsmttokenizer frompretrainedfacebookwmt19ruen dolowercasetrue tokens tokenizer tokenizeusa is united states of america expected us aw isw un i tedw st atesw ofw am er icaw self assertlistequaltokens expected unittest skipfsmtconfig init requires nonoptional args def testtorchencodeplussenttomodelself pass unittest skipfsmtconfig init requires nonoptional args def testnpencodeplussenttomodelself pass coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license using a different tiny model than the one used for default params defined in init to ensure proper testing adapted from sennrich et al 2015 and https github com rsennrich subword nmt this just tests that the online tokenizer files get correctly fetched and loaded via its tokenizer_config json and it s not slow so it s run by normal ci adapted from sennrich et al 2015 and https github com rsennrich subword nmt if data needs to be recreated or added run import torch model torch hub load pytorch fairseq transformer wmt19 en ru checkpoint_file model4 pt tokenizer moses bpe fastbpe for src_text _ in targets print f n src_text n model encode src_text tolist n and decode backward using the reversed languages model
import json import os import unittest from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer from transformers.testing_utils import slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin FSMT_TINY2 = "stas/tiny-wmt19-en-ru" class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = FSMTTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.langs = ["en", "ru"] config = { "langs": self.langs, "src_vocab_size": 10, "tgt_vocab_size": 20, } self.src_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["src_vocab_file"]) self.tgt_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["tgt_vocab_file"]) config_file = os.path.join(self.tmpdirname, "tokenizer_config.json") self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) with open(config_file, "w") as fp: fp.write(json.dumps(config)) @cached_property def tokenizer_ru_en(self): return FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en") @cached_property def tokenizer_en_ru(self): return FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru") def test_online_tokenizer_config(self): tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2) self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ["en", "ru"]) self.assertEqual(tokenizer.src_vocab_size, 21) self.assertEqual(tokenizer.tgt_vocab_size, 21) def test_full_tokenizer(self): tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_ru_en text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == text + [2] assert encoded_pair == text + [2] + text_2 + [2] @slow def test_match_encode_decode(self): tokenizer_enc = self.tokenizer_en_ru tokenizer_dec = self.tokenizer_ru_en targets = [ [ "Here's a little song I wrote. Don't worry, be happy.", [2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2], ], ["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]], ] for src_text, tgt_input_ids in targets: encoded_ids = tokenizer_enc.encode(src_text, return_tensors=None) self.assertListEqual(encoded_ids, tgt_input_ids) decoded_text = tokenizer_dec.decode(encoded_ids, skip_special_tokens=True) self.assertEqual(decoded_text, src_text) @slow def test_tokenizer_lower(self): tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en", do_lower_case=True) tokens = tokenizer.tokenize("USA is United States of America") expected = ["us", "a</w>", "is</w>", "un", "i", "ted</w>", "st", "ates</w>", "of</w>", "am", "er", "ica</w>"] self.assertListEqual(tokens, expected) @unittest.skip("FSMTConfig.__init__ requires non-optional args") def test_torch_encode_plus_sent_to_model(self): pass @unittest.skip("FSMTConfig.__init__ requires non-optional args") def test_np_encode_plus_sent_to_model(self): pass
codingutf8 2020 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license you can also import this e g from testmodelingfunnel import funnelmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 blocksizes1 1 2 numdecoderlayers1 dmodel32 nhead4 dhead8 dinner37 hiddenactgelunew hiddendropout0 1 attentiondropout0 1 activationdropout0 0 maxpositionembeddings512 typevocabsize3 initializerstd0 02 set to a smaller value so we can keep the small error threshold 1e5 in the test numlabels3 numchoices4 scopenone basefalse self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self blocksizes blocksizes self numdecoderlayers numdecoderlayers self dmodel dmodel self nhead nhead self dhead dhead self dinner dinner self hiddenact hiddenact self hiddendropout hiddendropout self attentiondropout attentiondropout self activationdropout activationdropout self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize 2 self numlabels numlabels self numchoices numchoices self scope scope self initializerstd initializerstd used in the tests to check the size of the first attention layer self numattentionheads nhead used in the tests to check the size of the first hidden state self hiddensize self dmodel used in the tests to check the number of output hidden statesattentions self numhiddenlayers sumself blocksizes 0 if base else self numdecoderlayers funnelmodel adds two hidden layers input embeddings and the sum of the upsampled encoder hidden state with the last hidden state of the first block which is the first hidden state of the decoder if not base self expectednumhiddenlayers self numhiddenlayers 2 def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask idstensorself batchsize self seqlength vocabsize2 tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices faketokenlabels idstensorself batchsize self seqlength 1 config self getconfig return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels def getconfigself return funnelconfig vocabsizeself vocabsize blocksizesself blocksizes numdecoderlayersself numdecoderlayers dmodelself dmodel nheadself nhead dheadself dhead dinnerself dinner hiddenactself hiddenact hiddendropoutself hiddendropout attentiondropoutself attentiondropout activationdropoutself activationdropout maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerstdself initializerstd def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels model funnelmodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self dmodel model config truncateseq false result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self dmodel model config separatecls false result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self dmodel def createandcheckbasemodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels model funnelbasemodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize 2 self dmodel model config truncateseq false result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize 3 self dmodel model config separatecls false result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize 2 self dmodel def createandcheckforpretraining self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels config numlabels self numlabels model funnelforpretrainingconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelsfaketokenlabels self parent assertequalresult logits shape self batchsize self seqlength def createandcheckformaskedlm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels model funnelformaskedlmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckforsequenceclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels config numlabels self numlabels model funnelforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckformultiplechoice self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels config numchoices self numchoices model funnelformultiplechoiceconfigconfig model totorchdevice model eval multiplechoiceinputsids inputids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoicetokentypeids tokentypeids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceinputmask inputmask unsqueeze1 expand1 self numchoices 1 contiguous result model multiplechoiceinputsids attentionmaskmultiplechoiceinputmask tokentypeidsmultiplechoicetokentypeids labelschoicelabels self parent assertequalresult logits shape self batchsize self numchoices def createandcheckfortokenclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels config numlabels self numlabels model funnelfortokenclassificationconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckforquestionanswering self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels model funnelforquestionansweringconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels faketokenlabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class funnelmodeltestmodeltestermixin pipelinetestermixin unittest testcase testheadmasking false testpruning false allmodelclasses funnelmodel funnelformaskedlm funnelforpretraining funnelforquestionanswering funnelfortokenclassification if istorchavailable else pipelinemodelmapping featureextraction funnelbasemodel funnelmodel fillmask funnelformaskedlm questionanswering funnelforquestionanswering textclassification funnelforsequenceclassification tokenclassification funnelfortokenclassification zeroshot funnelforsequenceclassification if istorchavailable else special case for forpretraining model def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass in getvaluesmodelforpretrainingmapping inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice return inputsdict def setupself self modeltester funnelmodeltesterself self configtester configtesterself configclassfunnelconfig def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforpretrainingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforpretrainingconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 for param in rwbias rrbias rkernel rsbias segembed if hasattrmodule param and getattrmodule param is not none weight getattrmodule param weight data fill3 requiretorch class funnelbasemodeltestmodeltestermixin unittest testcase testheadmasking false testpruning false allmodelclasses funnelbasemodel funnelformultiplechoice funnelforsequenceclassification if istorchavailable else def setupself self modeltester funnelmodeltesterself basetrue self configtester configtesterself configclassfunnelconfig def testconfigself self configtester runcommontests def testbasemodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbasemodelconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs overwrite from testmodelingcommon def testtrainingself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses if modelclass name funnelbasemodel continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 for param in rwbias rrbias rkernel rsbias segembed if hasattrmodule param and getattrmodule param is not none weight getattrmodule param weight data fill3 requiretorch requiresentencepiece requiretokenizers class funnelmodelintegrationtestunittest testcase def testinferencetinymodelself batchsize 13 sequencelength 7 inputids torch arange0 batchsize sequencelength long reshapebatchsize sequencelength lengths 0 1 2 3 4 5 6 4 1 3 5 0 1 tokentypeids torch tensor2 0 a 1 sequencelength a 1 for a in lengths model funnelmodel frompretrainedsguggerfunnelrandomtiny output modelinputids tokentypeidstokentypeids0 abs expectedoutputsum torch tensor2344 8352 expectedoutputmean torch tensor0 8052 self asserttruetorch allcloseoutput sum expectedoutputsum atol1e4 self asserttruetorch allcloseoutput mean expectedoutputmean atol1e4 attentionmask torch tensor1 7 1 4 0 3 6 0 1 1 0 0 1 1 output modelinputids attentionmaskattentionmask tokentypeidstokentypeids0 abs expectedoutputsum torch tensor2343 8425 expectedoutputmean torch tensor0 8049 self asserttruetorch allcloseoutput sum expectedoutputsum atol1e4 self asserttruetorch allcloseoutput mean expectedoutputmean atol1e4 slow def testinferencemodelself tokenizer funneltokenizer frompretrainedhuggingfacefunnelsmall model funnelmodel frompretrainedhuggingfacefunnelsmall inputs tokenizerhello i am the funnel transformer model returntensorspt output modelinputs0 expectedoutputsum torch tensor235 7246 expectedoutputmean torch tensor0 0256 self asserttruetorch allcloseoutput sum expectedoutputsum atol1e4 self asserttruetorch allcloseoutput mean expectedoutputmean atol1e4 coding utf 8 2020 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license you can also import this e g from test_modeling_funnel import funnelmodeltester set to a smaller value so we can keep the small error threshold 1e 5 in the test used in the tests to check the size of the first attention layer used in the tests to check the size of the first hidden state used in the tests to check the number of output hidden states attentions funnelmodel adds two hidden layers input embeddings and the sum of the upsampled encoder hidden state with the last hidden state of the first block which is the first hidden state of the decoder special case for forpretraining model overwrite from test_modeling_common overwrite from test_modeling_common overwrite from test_modeling_common
import unittest from transformers import FunnelConfig, FunnelTokenizer, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) class FunnelModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, block_sizes=[1, 1, 2], num_decoder_layers=1, d_model=32, n_head=4, d_head=8, d_inner=37, hidden_act="gelu_new", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, max_position_embeddings=512, type_vocab_size=3, initializer_std=0.02, num_labels=3, num_choices=4, scope=None, base=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.block_sizes = block_sizes self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = 2 self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.initializer_std = initializer_std self.num_attention_heads = n_head self.hidden_size = self.d_model self.num_hidden_layers = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) if not base: self.expected_num_hidden_layers = self.num_hidden_layers + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1) config = self.get_config() return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) def get_config(self): return FunnelConfig( vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) model.config.truncate_seq = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) model.config.separate_cls = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) def create_and_check_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelBaseModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) model.config.truncate_seq = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model)) model.config.separate_cls = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForPreTraining(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_choices = self.num_choices model = FunnelForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FunnelModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False all_model_classes = ( ( FunnelModel, FunnelForMaskedLM, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": (FunnelBaseModel, FunnelModel), "fill-mask": FunnelForMaskedLM, "question-answering": FunnelForQuestionAnswering, "text-classification": FunnelForSequenceClassification, "token-classification": FunnelForTokenClassification, "zero-shot": FunnelForSequenceClassification, } if is_torch_available() else {} ) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FunnelModelTester(self) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) @require_torch class FunnelBaseModelTest(ModelTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False all_model_classes = ( (FunnelBaseModel, FunnelForMultipleChoice, FunnelForSequenceClassification) if is_torch_available() else () ) def setUp(self): self.model_tester = FunnelModelTester(self, base=True) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_training(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ == "FunnelBaseModel": continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers class FunnelModelIntegrationTest(unittest.TestCase): def test_inference_tiny_model(self): batch_size = 13 sequence_length = 7 input_ids = torch.arange(0, batch_size * sequence_length).long().reshape(batch_size, sequence_length) lengths = [0, 1, 2, 3, 4, 5, 6, 4, 1, 3, 5, 0, 1] token_type_ids = torch.tensor([[2] + [0] * a + [1] * (sequence_length - a - 1) for a in lengths]) model = FunnelModel.from_pretrained("sgugger/funnel-random-tiny") output = model(input_ids, token_type_ids=token_type_ids)[0].abs() expected_output_sum = torch.tensor(2344.8352) expected_output_mean = torch.tensor(0.8052) self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4)) self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4)) attention_mask = torch.tensor([[1] * 7, [1] * 4 + [0] * 3] * 6 + [[0, 1, 1, 0, 0, 1, 1]]) output = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0].abs() expected_output_sum = torch.tensor(2343.8425) expected_output_mean = torch.tensor(0.8049) self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4)) self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4)) @slow def test_inference_model(self): tokenizer = FunnelTokenizer.from_pretrained("huggingface/funnel-small") model = FunnelModel.from_pretrained("huggingface/funnel-small") inputs = tokenizer("Hello! I am the Funnel Transformer model.", return_tensors="pt") output = model(**inputs)[0] expected_output_sum = torch.tensor(235.7246) expected_output_mean = torch.tensor(0.0256) self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4)) self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4))
codingutf8 2020 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license you can also import this e g from testmodelingfunnel import funnelmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 blocksizes1 1 2 numdecoderlayers1 dmodel32 nhead4 dhead8 dinner37 hiddenactgelunew hiddendropout0 1 attentiondropout0 1 activationdropout0 0 maxpositionembeddings512 typevocabsize3 initializerstd0 02 set to a smaller value so we can keep the small error threshold 1e5 in the test numlabels3 numchoices4 scopenone basefalse self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self blocksizes blocksizes self numdecoderlayers numdecoderlayers self dmodel dmodel self nhead nhead self dhead dhead self dinner dinner self hiddenact hiddenact self hiddendropout hiddendropout self attentiondropout attentiondropout self activationdropout activationdropout self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize 2 self numlabels numlabels self numchoices numchoices self scope scope self initializerstd initializerstd used in the tests to check the size of the first attention layer self numattentionheads nhead used in the tests to check the size of the first hidden state self hiddensize self dmodel used in the tests to check the number of output hidden statesattentions self numhiddenlayers sumself blocksizes 0 if base else self numdecoderlayers funnelmodel adds two hidden layers input embeddings and the sum of the upsampled encoder hidden state with the last hidden state of the first block which is the first hidden state of the decoder if not base self expectednumhiddenlayers self numhiddenlayers 2 def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config funnelconfig vocabsizeself vocabsize blocksizesself blocksizes numdecoderlayersself numdecoderlayers dmodelself dmodel nheadself nhead dheadself dhead dinnerself dinner hiddenactself hiddenact hiddendropoutself hiddendropout attentiondropoutself attentiondropout activationdropoutself activationdropout maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerstdself initializerstd return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model tffunnelmodelconfigconfig inputs inputids inputids attentionmask inputmask tokentypeids tokentypeids result modelinputs inputs inputids inputmask result modelinputs result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self dmodel config truncateseq false model tffunnelmodelconfigconfig result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self dmodel config separatecls false model tffunnelmodelconfigconfig result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self dmodel def createandcheckbasemodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model tffunnelbasemodelconfigconfig inputs inputids inputids attentionmask inputmask tokentypeids tokentypeids result modelinputs inputs inputids inputmask result modelinputs result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize 2 self dmodel config truncateseq false model tffunnelbasemodelconfigconfig result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize 3 self dmodel config separatecls false model tffunnelbasemodelconfigconfig result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize 2 self dmodel def createandcheckforpretraining self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model tffunnelforpretrainingconfigconfig inputs inputids inputids attentionmask inputmask tokentypeids tokentypeids result modelinputs self parent assertequalresult logits shape self batchsize self seqlength def createandcheckformaskedlm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model tffunnelformaskedlmconfigconfig inputs inputids inputids attentionmask inputmask tokentypeids tokentypeids result modelinputs self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckforsequenceclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model tffunnelforsequenceclassificationconfigconfig inputs inputids inputids attentionmask inputmask tokentypeids tokentypeids result modelinputs self parent assertequalresult logits shape self batchsize self numlabels def createandcheckformultiplechoice self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numchoices self numchoices model tffunnelformultiplechoiceconfigconfig multiplechoiceinputsids tf tiletf expanddimsinputids 1 1 self numchoices 1 multiplechoiceinputmask tf tiletf expanddimsinputmask 1 1 self numchoices 1 multiplechoicetokentypeids tf tiletf expanddimstokentypeids 1 1 self numchoices 1 inputs inputids multiplechoiceinputsids attentionmask multiplechoiceinputmask tokentypeids multiplechoicetokentypeids result modelinputs self parent assertequalresult logits shape self batchsize self numchoices def createandcheckfortokenclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model tffunnelfortokenclassificationconfigconfig inputs inputids inputids attentionmask inputmask tokentypeids tokentypeids result modelinputs self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckforquestionanswering self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model tffunnelforquestionansweringconfigconfig inputs inputids inputids attentionmask inputmask tokentypeids tokentypeids result modelinputs self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretf class tffunnelmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tffunnelmodel tffunnelformaskedlm tffunnelforpretraining tffunnelforquestionanswering tffunnelfortokenclassification if istfavailable else pipelinemodelmapping featureextraction tffunnelbasemodel tffunnelmodel fillmask tffunnelformaskedlm questionanswering tffunnelforquestionanswering textclassification tffunnelforsequenceclassification tokenclassification tffunnelfortokenclassification zeroshot tffunnelforsequenceclassification if istfavailable else testheadmasking false testonnx false def setupself self modeltester tffunnelmodeltesterself self configtester configtesterself configclassfunnelconfig def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforpretrainingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforpretrainingconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs requiretf class tffunnelbasemodeltesttfmodeltestermixin unittest testcase allmodelclasses tffunnelbasemodel tffunnelformultiplechoice tffunnelforsequenceclassification if istfavailable else testheadmasking false testonnx false def setupself self modeltester tffunnelmodeltesterself basetrue self configtester configtesterself configclassfunnelconfig def testconfigself self configtester runcommontests def testbasemodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbasemodelconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs coding utf 8 2020 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license you can also import this e g from test_modeling_funnel import funnelmodeltester set to a smaller value so we can keep the small error threshold 1e 5 in the test used in the tests to check the size of the first attention layer used in the tests to check the size of the first hidden state used in the tests to check the number of output hidden states attentions funnelmodel adds two hidden layers input embeddings and the sum of the upsampled encoder hidden state with the last hidden state of the first block which is the first hidden state of the decoder
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class TFFunnelModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, block_sizes=[1, 1, 2], num_decoder_layers=1, d_model=32, n_head=4, d_head=8, d_inner=37, hidden_act="gelu_new", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, max_position_embeddings=512, type_vocab_size=3, initializer_std=0.02, num_labels=3, num_choices=4, scope=None, base=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.block_sizes = block_sizes self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = 2 self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.initializer_std = initializer_std self.num_attention_heads = n_head self.hidden_size = self.d_model self.num_hidden_layers = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) if not base: self.expected_num_hidden_layers = self.num_hidden_layers + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = FunnelConfig( vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) config.truncate_seq = False model = TFFunnelModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) config.separate_cls = False model = TFFunnelModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) def create_and_check_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelBaseModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) config.truncate_seq = False model = TFFunnelBaseModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model)) config.separate_cls = False model = TFFunnelBaseModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = TFFunnelForSequenceClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_choices = self.num_choices model = TFFunnelForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = TFFunnelForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelForQuestionAnswering(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFFunnelModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFFunnelModelTester(self) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @require_tf class TFFunnelBaseModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFFunnelModelTester(self, base=True) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
h 450 w 210 fitting 160 320 160 210160450 h 450 w 210 fitting 160 320 160 210 160 450
import unittest import numpy as np from transformers import is_torch_available, is_vision_available from transformers.testing_utils import ( require_torch, require_torchvision, require_vision, ) if is_torch_available() and is_vision_available(): import torch from transformers import FuyuImageProcessor if is_vision_available(): from PIL import Image @require_torch @require_vision @require_torchvision class TestFuyuImageProcessor(unittest.TestCase): def setUp(self): self.size = {"height": 160, "width": 320} self.processor = FuyuImageProcessor(size=self.size, padding_value=1.0) self.batch_size = 3 self.channels = 3 self.height = 300 self.width = 300 self.image_input = torch.rand(self.batch_size, self.channels, self.height, self.width) self.image_patch_dim_h = 30 self.image_patch_dim_w = 30 self.sample_image = np.zeros((450, 210, 3), dtype=np.uint8) self.sample_image_pil = Image.fromarray(self.sample_image) def test_patches(self): expected_num_patches = self.processor.get_num_patches(image_height=self.height, image_width=self.width) patches_final = self.processor.patchify_image(image=self.image_input) assert ( patches_final.shape[1] == expected_num_patches ), f"Expected {expected_num_patches} patches, got {patches_final.shape[1]}." def test_scale_to_target_aspect_ratio(self): scaled_image = self.processor.resize(self.sample_image, size=self.size) self.assertEqual(scaled_image.shape[0], 160) self.assertEqual(scaled_image.shape[1], 74) def test_apply_transformation_numpy(self): transformed_image = self.processor.preprocess(self.sample_image).images[0][0] self.assertEqual(transformed_image.shape[1], 160) self.assertEqual(transformed_image.shape[2], 320) def test_apply_transformation_pil(self): transformed_image = self.processor.preprocess(self.sample_image_pil).images[0][0] self.assertEqual(transformed_image.shape[1], 160) self.assertEqual(transformed_image.shape[2], 320)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch fuyu model import io import unittest import requests from transformers import fuyuconfig istorchavailable isvisionavailable from transformers testingutils import requiretorch requiretorchgpu slow torchdevice from transformers utils import cachedproperty from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if isvisionavailable from pil import image if istorchavailable and isvisionavailable from transformers import fuyuprocessor if istorchavailable import torch from transformers import fuyuforcausallm class fuyumodeltester def init self parent batchsize13 seqlength7 imagesize30 patchsize15 numchannels3 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 padtokenid0 scopenone self parent parent self batchsize batchsize self seqlength seqlength self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self padtokenid padtokenid self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength sequencelabels none tokenlabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels config self getconfig return config inputids inputmask sequencelabels tokenlabels def getconfigself return fuyuconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange padtokenidself padtokenid def createandcheckmodel self config inputids inputmask sequencelabels tokenlabels model fuyuforcausallmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckmodelasdecoder self config inputids inputmask sequencelabels tokenlabels encoderhiddenstates encoderattentionmask config addcrossattention true model fuyuforcausallmconfig model totorchdevice model eval result model inputids attentionmaskinputmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask result model inputids attentionmaskinputmask encoderhiddenstatesencoderhiddenstates result modelinputids attentionmaskinputmask self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforcausallm self config inputids inputmask sequencelabels tokenlabels encoderhiddenstates encoderattentionmask model fuyuforcausallmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckdecodermodelpastlargeinputs self config inputids inputmask sequencelabels tokenlabels encoderhiddenstates encoderattentionmask config isdecoder true config addcrossattention true model fuyuforcausallmconfigconfig model totorchdevice model eval first forward pass outputs model inputids attentionmaskinputmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask usecachetrue pastkeyvalues outputs pastkeyvalues create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextmask idstensorself batchsize 3 vocabsize2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catinputmask nextmask dim1 outputfromnopast model nextinputids attentionmasknextattentionmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask outputhiddenstatestrue hiddenstates0 outputfrompast model nexttokens attentionmasknextattentionmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask pastkeyvaluespastkeyvalues outputhiddenstatestrue hiddenstates0 select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask sequencelabels tokenlabels configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class fuyumodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses fuyuforcausallm if istorchavailable else pipelinemodelmapping textgeneration fuyuforcausallm if istorchavailable else testheadmasking false testpruning false testcpuoffload false testdiskoffload false testmodelparallel false def setupself self modeltester fuyumodeltesterself unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass todo fix me once this model gets more usage unittest skipdoes not work on the tiny model def testdiskoffloadbinself super testdiskoffload todo fix me once this model gets more usage unittest skipdoes not work on the tiny model def testdiskoffloadsafetensorsself super testdiskoffload todo fix me once this model gets more usage unittest skipdoes not work on the tiny model def testmodelparallelismself super testmodelparallelism slow requiretorchgpu class fuyumodelintegrationtestunittest testcase cachedproperty def defaultprocessorself return fuyuprocessor frompretrainedadeptfuyu8b cachedproperty def defaultmodelself return fuyuforcausallm frompretrainedadeptfuyu8b def testgreedygenerationself processor self defaultprocessor model self defaultmodel url https huggingface codatasetshfinternaltestingfixturescaptioningresolvemainbus png image image openio bytesiorequests geturl content textpromptcococaptioning generate a cocostyle caption n inputs processortexttextpromptcococaptioning imagesimage returntensorspt generatedids model generateinputs maxnewtokens10 take the last 8 tokens in order to skip special nx04 characters and decode them generatedtext processor batchdecodegeneratedids 8 skipspecialtokenstrue0 self assertequalgeneratedtext a blue bus parked on the side of a road coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch fuyu model first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice todo fix me once this model gets more usage todo fix me once this model gets more usage todo fix me once this model gets more usage take the last 8 tokens in order to skip special n x04 characters and decode them slow require_torch_accelerator def test_model_8b_chat_greedy_generation_bus_color self expected_text_completion the bus is blue n endoftext text_prompt_bus_color what color is the bus n model_inputs_bus_color self processor text text_prompt_bus_color images self bus_image_pil generated_tokens self model generate model_inputs_bus_color max_new_tokens 10 text self processor tokenizer batch_decode generated_tokens end_sequence text 0 split x04 1 clean_sequence end_sequence end_sequence find endoftext len endoftext if endoftext in end_sequence else end_sequence self assertequal expected_text_completion clean_sequence slow require_torch_accelerator def test_model_8b_chat_greedy_generation_chart_vqa self expected_text_tokens the life expectancy at birth of male s in 20 18 is 80 7 n endoftext fmt skip expected_text_completion join expected_text_tokens todo make sure the end string matches text_prompt_chart_vqa what is the highest life expectancy at birth of male n chart_image_url https huggingface co datasets hf internal testing fixtures captioning resolve main chart png chart_image_pil image open io bytesio requests get chart_image_url content model_inputs_chart_vqa self processor text text_prompt_chart_vqa images chart_image_pil generated_tokens self model generate model_inputs_chart_vqa max_new_tokens 10 text self processor tokenizer batch_decode generated_tokens end_sequence text 0 split x04 1 clean_sequence end_sequence end_sequence find endoftext len endoftext if endoftext in end_sequence else end_sequence self assertequal expected_text_completion clean_sequence slow require_torch_accelerator def test_model_8b_chat_greedy_generation_bounding_box self expected_text_completion x00194213202244 x01 endoftext text_prompt_bbox when presented with a box perform ocr to extract text contained within it if provided with text generate the corresponding bounding box nwilliams noqa e231 bbox_image_url https huggingface co datasets hf internal testing fixtures captioning resolve main bbox_sample_image png bbox_image_pil image open io bytesio requests get bbox_image_url content model_inputs_bbox self processor text text_prompt_bbox images bbox_image_pil generated_tokens self model generate model_inputs_bbox max_new_tokens 10 text self processor tokenizer batch_decode generated_tokens end_sequence text 0 split x04 1 clean_sequence end_sequence end_sequence find endoftext len endoftext if endoftext in end_sequence else end_sequence self assertequal expected_text_completion clean_sequence
import io import unittest import requests from transformers import FuyuConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from transformers.utils import cached_property from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_vision_available(): from PIL import Image if is_torch_available() and is_vision_available(): from transformers import FuyuProcessor if is_torch_available(): import torch from transformers import FuyuForCausalLM class FuyuModelTester: def __init__( self, parent, batch_size=13, seq_length=7, image_size=30, patch_size=15, num_channels=3, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels def get_config(self): return FuyuConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, input_mask, sequence_labels, token_labels, ): model = FuyuForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, input_mask, sequence_labels, token_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = FuyuForCausalLM(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, encoder_hidden_states, encoder_attention_mask, ): model = FuyuForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, sequence_labels, token_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = FuyuForCausalLM(config=config) model.to(torch_device) model.eval() outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FuyuModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FuyuForCausalLM,) if is_torch_available() else () pipeline_model_mapping = {"text-generation": FuyuForCausalLM} if is_torch_available() else {} test_head_masking = False test_pruning = False test_cpu_offload = False test_disk_offload = False test_model_parallel = False def setUp(self): self.model_tester = FuyuModelTester(self) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip("Does not work on the tiny model.") def test_disk_offload_bin(self): super().test_disk_offload() @unittest.skip("Does not work on the tiny model.") def test_disk_offload_safetensors(self): super().test_disk_offload() @unittest.skip("Does not work on the tiny model.") def test_model_parallelism(self): super().test_model_parallelism() @slow @require_torch_gpu class FuyuModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return FuyuProcessor.from_pretrained("adept/fuyu-8b") @cached_property def default_model(self): return FuyuForCausalLM.from_pretrained("adept/fuyu-8b") def test_greedy_generation(self): processor = self.default_processor model = self.default_model url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" image = Image.open(io.BytesIO(requests.get(url).content)) text_prompt_coco_captioning = "Generate a coco-style caption.\n" inputs = processor(text=text_prompt_coco_captioning, images=image, return_tensors="pt") generated_ids = model.generate(**inputs, max_new_tokens=10) generated_text = processor.batch_decode(generated_ids[:, -8:], skip_special_tokens=True)[0] self.assertEqual(generated_text, "A blue bus parked on the side of a road.")
def setupself pretrainedmodelname adeptfuyu8b self tokenizer autotokenizer frompretrainedpretrainedmodelname self imageprocessor fuyuimageprocessor self processor fuyuprocessorimageprocessorself imageprocessor tokenizerself tokenizer self textprompt generate a cocostyle caption n busimageurl https huggingface codatasetshfinternaltestingfixturescaptioningresolvemainbus png self busimagepil image openio bytesiorequests getbusimageurl content def testfuyuprocessingself fmt off expectedimagepatchinputs torch tensor0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 1 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 1 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 1 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 1 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 1 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 1 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 1 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 1 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 1 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 1 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 1 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 1 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 1 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 totorch int64 expectedpaddedunpackedtokeninputs torch tensor71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 1 128340 71374 71389 120412 71377 71835 71374 73615 71375 71399 71435 71122 totorch int64 oneimagebusmodelinputs self processortextself textprompt imagesself busimagepil fmt on torch testing assertcloseoneimagebusmodelinputsimagepatchesindices expectedimagepatchinputs torch testing assertcloseoneimagebusmodelinputsinputids expectedpaddedunpackedtokeninputs def testfuyuprocessingnoimageself processoroutputs self processortextself textprompt tokenizeroutputs self tokenizerself textprompt self assertequalprocessoroutputsinputids tokenizeroutputsinputids def testfuyuprocessingnotextself fmt off expectedimagepatchinputs torch tensor 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 1 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 1 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 1 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 1 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 1 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 1 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 1 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 1 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 1 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 1 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 1 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 1 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 1 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 1 1 1 1 1 1 1 1 1 1 1 1 1 totorch int64 fmt on processoroutputs self processorimagesself busimagepil self asserttrueprocessoroutputsimagepatchesindices expectedimagepatchinputs all def testfuyuprocessingmultipleimagesampleself fmt off singleimagepatchinputs torch tensor0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 1 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 1 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 1 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 1 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 1 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 1 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 1 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 1 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 1 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 1 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 1 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 1 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 1 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 totorch int64 singlepaddedunpackedtokeninputs torch tensor71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71011 71019 1 128340 71374 71389 120412 71377 71835 71374 73615 71375 71399 71435 71122 totorch int64 singleresizedimagepatchinputs torch tensor 0 1 2 1 3 4 5 1 6 7 8 1 9 10 11 1 12 13 14 1 15 16 17 1 18 19 20 1 21 22 23 1 24 25 26 1 27 28 29 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 singleresizedpaddedunpackedtokeninputs torch tensor 71011 71011 71011 71019 71011 71011 71011 71019 71011 71011 71011 71019 71011 71011 71011 71019 71011 71011 71011 71019 71011 71011 71011 71019 71011 71011 71011 71019 71011 71011 71011 71019 71011 71011 71011 71019 71011 71011 71011 71019 1 128340 71374 71389 120412 71377 71835 71374 73615 71375 71399 71435 71122 fmt on batch of two images equally sized images self busimagepil self busimagepil processoroutputs self processortextself textprompt self textprompt imagesimages self asserttrue processoroutputsimagepatchesindices torch catsingleimagepatchinputs singleimagepatchinputs dim0 all self asserttrue processoroutputsinputids torch catsinglepaddedunpackedtokeninputs singlepaddedunpackedtokeninputs dim0 all processes single images with different sizes as expected images self busimagepil processoroutputs self processortextself textprompt imagesimages self asserttrueprocessoroutputsimagepatchesindices singleimagepatchinputs all self asserttrueprocessoroutputsinputids singlepaddedunpackedtokeninputs all images self busimagepil resize64 300 processoroutputs self processortextself textprompt imagesimages self asserttrueprocessoroutputsimagepatchesindices singleresizedimagepatchinputs all self asserttrueprocessoroutputsinputids singleresizedpaddedunpackedtokeninputs all batch of two images different sizes leftpads the smaller image inputs images self busimagepil self busimagepil resize64 300 processoroutputs self processortextself textprompt self textprompt imagesimages paddinglenpatch singleimagepatchinputs shape1 singleresizedimagepatchinputs shape1 paddedsingleresizedimagepatch torch cat torch ones1 paddinglenpatch 1 singleresizedimagepatchinputs dim1 expectedimagepatchinputs torch catsingleimagepatchinputs paddedsingleresizedimagepatch dim0 paddinglentoken singlepaddedunpackedtokeninputs shape1 singleresizedpaddedunpackedtokeninputs shape1 paddedsingleresizedpaddedunpackedtokeninputs torch cat torch zeros1 paddinglentoken singleresizedpaddedunpackedtokeninputs dim1 expectedpaddedunpackedtokeninputs torch cat singlepaddedunpackedtokeninputs paddedsingleresizedpaddedunpackedtokeninputs dim0 self asserttrueprocessoroutputsimagepatchesindices expectedimagepatchinputs all self asserttrueprocessoroutputsinputids expectedpaddedunpackedtokeninputs all requiretorch class testimagetextprocessingutilsunittest testcase def setupself self batchsize 2 self newseqlen 8 self numsubsequences 1 self allbitokenstoplace 4 6 self fullunpackedstream torch tensor1 2 3 4 torch tensor5 6 7 8 9 10 self fillvalue 0 self numrealtexttokens 3 2 2 4 here the input stream is padded to avoid inconsistencies current model release matches self inputstream torch tensor1 2 3 4 5 0 6 7 0 8 9 10 self imagetokens torch tensor1 2 torch tensor3 torch tensor4 5 6 torch tensor7 8 def testfullunpackedstreamtotensorself result fullunpackedstreamtotensor self allbitokenstoplace self fullunpackedstream self fillvalue self batchsize self newseqlen offset0 expectedtensor torch tensor1 2 3 4 0 0 0 0 5 6 7 8 9 10 0 0 self asserttruetorch equalresult expectedtensor def testconstructfullunpackedstreamself result constructfullunpackedstream self numrealtexttokens self inputstream self imagetokens self batchsize self numsubsequences expectedunpackedstream torch tensor1 2 1 2 3 torch tensor4 5 6 6 7 for i in rangelenresult self asserttruetorch equalresulti expectedunpackedstreami requiretorch class testprocessimagesformodelinputunittest testcase def setupself self imageinput torch randn1 1 3 64 64 self imagepresent torch tensor1 self imageunpaddedh torch tensor45 adjusted for subsequence of 1 self imageunpaddedw torch tensor50 adjusted for subsequence of 1 self imagepatchdimh 16 self imagepatchdimw 16 self imageplaceholderid 999 self imagenewlineid 888 self variablesized true self imageprocessor fuyuimageprocessor patchsizeheight self imagepatchdimh width self imagepatchdimw def testprocessimagesformodelinputfixedsizedself self variablesized false result self imageprocessor preprocesswithtokenizerinfo imageinputself imageinput imagepresentself imagepresent imageunpaddedhself imageunpaddedh imageunpaddedwself imageunpaddedw imageplaceholderidself imageplaceholderid imagenewlineidself imagenewlineid variablesizedself variablesized self assertequalresultimages00 shape torch size3 64 64 todo which mixins do we add here test to ensure that the standard processing on a gold example matches adept s code fmt off fmt on test to check processor works with just text input test to check processor works with just image input fmt off fmt on test to check processor works with multiple image inputs for a single text input fmt off fmt on batch of two images equally sized processes single images with different sizes as expected batch of two images different sizes left pads the smaller image inputs here the input stream is padded to avoid inconsistencies current model release matches adding a mix of present and absent images adjusted for subsequence of 1 adjusted for subsequence of 1
import io import unittest import requests from transformers import AutoTokenizer, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_gpu, slow if is_vision_available(): from PIL import Image if is_vision_available() and is_torch_available(): from transformers import FuyuImageProcessor, FuyuProcessor if is_torch_available(): import torch from transformers.models.fuyu.processing_fuyu import construct_full_unpacked_stream, full_unpacked_stream_to_tensor @require_torch @require_torch_gpu @slow class FuyuProcessingTest(unittest.TestCase): def setUp(self): pretrained_model_name = "adept/fuyu-8b" self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name) self.image_processor = FuyuImageProcessor() self.processor = FuyuProcessor(image_processor=self.image_processor, tokenizer=self.tokenizer) self.text_prompt = "Generate a coco-style caption.\\n" bus_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" self.bus_image_pil = Image.open(io.BytesIO(requests.get(bus_image_url).content)) def test_fuyu_processing(self): EXPECTED_IMAGE_PATCH_INPUTS = torch.Tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, -1, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, -1, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, -1, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, -1, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, -1, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, -1, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, -1, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, -1, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, -1, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, -1, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,]]).to(torch.int64) EXPECTED_PADDED_UNPACKED_TOKEN_INPUTS = torch.Tensor([[71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 1, 128340, 71374, 71389, 120412, 71377, 71835, 71374, 73615, 71375, 71399, 71435, 71122,]]).to(torch.int64) one_image_bus_model_inputs = self.processor(text=self.text_prompt, images=self.bus_image_pil) torch.testing.assert_close(one_image_bus_model_inputs["image_patches_indices"], EXPECTED_IMAGE_PATCH_INPUTS) torch.testing.assert_close(one_image_bus_model_inputs["input_ids"], EXPECTED_PADDED_UNPACKED_TOKEN_INPUTS) def test_fuyu_processing_no_image(self): processor_outputs = self.processor(text=self.text_prompt) tokenizer_outputs = self.tokenizer(self.text_prompt) self.assertEqual(processor_outputs["input_ids"], tokenizer_outputs["input_ids"]) def test_fuyu_processing_no_text(self): EXPECTED_IMAGE_PATCH_INPUTS = torch.Tensor([ [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, -1, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, -1, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, -1, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, -1, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, -1, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, -1, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, -1, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, -1, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, -1, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, -1, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1] ]).to(torch.int64) processor_outputs = self.processor(images=self.bus_image_pil) self.assertTrue((processor_outputs["image_patches_indices"] == EXPECTED_IMAGE_PATCH_INPUTS).all()) def test_fuyu_processing_multiple_image_sample(self): SINGLE_IMAGE_PATCH_INPUTS = torch.Tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, -1, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, -1, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, -1, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, -1, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, -1, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, -1, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, -1, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, -1, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, -1, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, -1, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,]]).to(torch.int64) SINGLE_PADDED_UNPACKED_TOKEN_INPUTS = torch.Tensor([[71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71011, 71019, 1, 128340, 71374, 71389, 120412, 71377, 71835, 71374, 73615, 71375, 71399, 71435, 71122,]]).to(torch.int64) SINGLE_RESIZED_IMAGE_PATCH_INPUTS = torch.Tensor([[ 0, 1, 2, -1, 3, 4, 5, -1, 6, 7, 8, -1, 9, 10, 11, -1, 12, 13, 14, -1, 15, 16, 17, -1, 18, 19, 20, -1, 21, 22, 23, -1, 24, 25, 26, -1, 27, 28, 29, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]]) SINGLE_RESIZED_PADDED_UNPACKED_TOKEN_INPUTS = torch.Tensor([[ 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 71011, 71011, 71011, 71019, 1, 128340, 71374, 71389, 120412, 71377, 71835, 71374, 73615, 71375, 71399, 71435, 71122]]) images = [self.bus_image_pil, self.bus_image_pil] processor_outputs = self.processor(text=[self.text_prompt, self.text_prompt], images=images) self.assertTrue( ( processor_outputs["image_patches_indices"] == torch.cat([SINGLE_IMAGE_PATCH_INPUTS, SINGLE_IMAGE_PATCH_INPUTS], dim=0) ).all() ) self.assertTrue( ( processor_outputs["input_ids"] == torch.cat([SINGLE_PADDED_UNPACKED_TOKEN_INPUTS, SINGLE_PADDED_UNPACKED_TOKEN_INPUTS], dim=0) ).all() ) images = [self.bus_image_pil] processor_outputs = self.processor(text=self.text_prompt, images=images) self.assertTrue((processor_outputs["image_patches_indices"] == SINGLE_IMAGE_PATCH_INPUTS).all()) self.assertTrue((processor_outputs["input_ids"] == SINGLE_PADDED_UNPACKED_TOKEN_INPUTS).all()) images = [self.bus_image_pil.resize((64, 300))] processor_outputs = self.processor(text=self.text_prompt, images=images) self.assertTrue((processor_outputs["image_patches_indices"] == SINGLE_RESIZED_IMAGE_PATCH_INPUTS).all()) self.assertTrue((processor_outputs["input_ids"] == SINGLE_RESIZED_PADDED_UNPACKED_TOKEN_INPUTS).all()) images = [self.bus_image_pil, self.bus_image_pil.resize((64, 300))] processor_outputs = self.processor(text=[self.text_prompt, self.text_prompt], images=images) padding_len_patch = SINGLE_IMAGE_PATCH_INPUTS.shape[1] - SINGLE_RESIZED_IMAGE_PATCH_INPUTS.shape[1] padded_single_resized_image_patch = torch.cat( [torch.ones([1, padding_len_patch]) * -1, SINGLE_RESIZED_IMAGE_PATCH_INPUTS], dim=1 ) expected_image_patch_inputs = torch.cat([SINGLE_IMAGE_PATCH_INPUTS, padded_single_resized_image_patch], dim=0) padding_len_token = ( SINGLE_PADDED_UNPACKED_TOKEN_INPUTS.shape[1] - SINGLE_RESIZED_PADDED_UNPACKED_TOKEN_INPUTS.shape[1] ) padded_single_resized_padded_unpacked_token_inputs = torch.cat( [torch.zeros([1, padding_len_token]), SINGLE_RESIZED_PADDED_UNPACKED_TOKEN_INPUTS], dim=1 ) expected_padded_unpacked_token_inputs = torch.cat( [SINGLE_PADDED_UNPACKED_TOKEN_INPUTS, padded_single_resized_padded_unpacked_token_inputs], dim=0 ) self.assertTrue((processor_outputs["image_patches_indices"] == expected_image_patch_inputs).all()) self.assertTrue((processor_outputs["input_ids"] == expected_padded_unpacked_token_inputs).all()) @require_torch class TestImageTextProcessingUtils(unittest.TestCase): def setUp(self): self.batch_size = 2 self.new_seq_len = 8 self.num_sub_sequences = 1 self.all_bi_tokens_to_place = [4, 6] self.full_unpacked_stream = [torch.tensor([1, 2, 3, 4]), torch.tensor([5, 6, 7, 8, 9, 10])] self.fill_value = 0 self.num_real_text_tokens = [[3, 2], [2, 4]] self.input_stream = torch.tensor([[[1, 2, 3], [4, 5, 0]], [[6, 7, 0], [8, 9, 10]]]) self.image_tokens = [ [torch.tensor([1, 2]), torch.tensor([3])], [torch.tensor([4, 5, 6]), torch.tensor([7, 8])], ] def test_full_unpacked_stream_to_tensor(self): result = full_unpacked_stream_to_tensor( self.all_bi_tokens_to_place, self.full_unpacked_stream, self.fill_value, self.batch_size, self.new_seq_len, offset=0, ) EXPECTED_TENSOR = torch.tensor([[1, 2, 3, 4, 0, 0, 0, 0], [5, 6, 7, 8, 9, 10, 0, 0]]) self.assertTrue(torch.equal(result, EXPECTED_TENSOR)) def test_construct_full_unpacked_stream(self): result = construct_full_unpacked_stream( self.num_real_text_tokens, self.input_stream, self.image_tokens, self.batch_size, self.num_sub_sequences ) EXPECTED_UNPACKED_STREAM = [torch.tensor([1, 2, 1, 2, 3]), torch.tensor([4, 5, 6, 6, 7])] for i in range(len(result)): self.assertTrue(torch.equal(result[i], EXPECTED_UNPACKED_STREAM[i])) @require_torch class TestProcessImagesForModelInput(unittest.TestCase): def setUp(self): self.image_input = torch.randn([1, 1, 3, 64, 64]) self.image_present = torch.tensor([[1]]) self.image_unpadded_h = torch.tensor([[45]]) self.image_unpadded_w = torch.tensor([[50]]) self.image_patch_dim_h = 16 self.image_patch_dim_w = 16 self.image_placeholder_id = 999 self.image_newline_id = 888 self.variable_sized = True self.image_processor = FuyuImageProcessor( patch_size={"height": self.image_patch_dim_h, "width": self.image_patch_dim_w} ) def test_process_images_for_model_input_fixed_sized(self): self.variable_sized = False result = self.image_processor.preprocess_with_tokenizer_info( image_input=self.image_input, image_present=self.image_present, image_unpadded_h=self.image_unpadded_h, image_unpadded_w=self.image_unpadded_w, image_placeholder_id=self.image_placeholder_id, image_newline_id=self.image_newline_id, variable_sized=self.variable_sized, ) self.assertEqual(result["images"][0][0].shape, torch.Size([3, 64, 64]))
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length numpatches 1 we add 1 for the cls token here we also overwrite some of the tests of testmodelingcommon py as git does not use inputids inputsembeds attentionmask and seqlength signature parameters is an ordereddict so argnames order is deterministic make sure the bos eos and pad tokens are within the vocab for git the sequence length is the sum of the text and patch tokens 1 due to the cls token returns a tiny configuration by default inference with pixel values inference without pixel values inference with pixel values inference without pixel values training generate generate special case for gitforcausallm model prepare image prepare question create batch of size 2 we have to prepare inputids with the same batch size as pixelvalues coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as git does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic make sure the bos eos and pad tokens are within the vocab for git the sequence length is the sum of the text and patch tokens 1 due to the cls token returns a tiny configuration by default inference with pixel values inference without pixel values inference with pixel values inference without pixel values training generate generate captioning no input_ids special case for gitforcausallm model prepare image prepare question create batch of size 2 we have to prepare input_ids with the same batch size as pixel_values
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import GitConfig, GitProcessor, GitVisionConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, GitForCausalLM, GitModel, GitVisionModel from transformers.models.git.modeling_git import GIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class GitVisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, patch_size=16, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return GitVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = GitVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class GitVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (GitVisionModel,) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = GitVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=GitVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="GIT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="GitVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="GitVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in GIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GitVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class GitModelTester: def __init__( self, parent, num_channels=3, image_size=32, patch_size=16, batch_size=13, text_seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.batch_size = batch_size self.text_seq_length = text_seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 self.seq_length = self.text_seq_length + int((self.image_size / self.patch_size) ** 2) + 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.text_seq_length]) pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, input_ids, input_mask, pixel_values def get_config(self): return GitConfig( vision_config={ "num_channels": self.num_channels, "image_size": self.image_size, "patch_size": self.patch_size, "hidden_size": self.hidden_size, "projection_dim": 32, "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, }, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) def create_and_check_model(self, config, input_ids, input_mask, pixel_values): model = GitModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, pixel_values=pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) def create_and_check_for_causal_lm(self, config, input_ids, input_mask, pixel_values): model = GitForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, pixel_values=pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.vocab_size)) result = model(input_ids, attention_mask=input_mask, pixel_values=pixel_values, labels=input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertTrue(result.loss.item() > 0) def _test_beam_search_generate(self, config, input_ids, input_mask, pixel_values): model = GitForCausalLM(config=config) model.to(torch_device) model.eval() generated_ids = model.generate( input_ids, attention_mask=input_mask, pixel_values=pixel_values, do_sample=False, max_length=20, num_beams=2, num_return_sequences=2, ) self.parent.assertEqual(generated_ids.shape, (self.batch_size * 2, 20)) def _test_batched_generate_captioning(self, config, input_ids, input_mask, pixel_values): model = GitForCausalLM(config=config) model.to(torch_device) model.eval() generated_ids = model.generate( input_ids=None, attention_mask=None, pixel_values=pixel_values, do_sample=False, max_length=20, num_beams=2, num_return_sequences=2, ) self.parent.assertEqual(generated_ids.shape, (self.batch_size * 2, 20)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, pixel_values, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": input_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class GitModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GitModel, GitForCausalLM) if is_torch_available() else () all_generative_model_classes = (GitForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": GitModel, "image-to-text": GitForCausalLM, "text-generation": GitForCausalLM} if is_torch_available() else {} ) fx_compatible = False test_torchscript = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_CAUSAL_LM_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=torch_device, ) return inputs_dict def setUp(self): self.model_tester = GitModelTester(self) self.config_tester = ConfigTester(self, config_class=GitConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_beam_search_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester._test_beam_search_generate(*config_and_inputs) def test_batched_generate_captioning(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester._test_batched_generate_captioning(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in GIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GitModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="GIT has pixel values as additional input") def test_beam_search_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_contrastive_generate(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_greedy_generate_dict_outputs_use_cache(self): pass @require_torch @require_vision @slow class GitModelIntegrationTest(unittest.TestCase): def test_forward_pass(self): processor = GitProcessor.from_pretrained("microsoft/git-base") model = GitForCausalLM.from_pretrained("microsoft/git-base") model.to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, text="hello world", return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 201, 30522)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-0.9514, -0.9512, -0.9507], [-0.5454, -0.5453, -0.5453], [-0.8862, -0.8857, -0.8848]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_image_captioning(self): processor = GitProcessor.from_pretrained("microsoft/git-base") model = GitForCausalLM.from_pretrained("microsoft/git-base") model.to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) outputs = model.generate( pixel_values=pixel_values, max_length=20, output_scores=True, return_dict_in_generate=True ) generated_caption = processor.batch_decode(outputs.sequences, skip_special_tokens=True)[0] expected_shape = torch.Size((1, 9)) self.assertEqual(outputs.sequences.shape, expected_shape) self.assertEquals(generated_caption, "two cats laying on a pink blanket") self.assertTrue(outputs.scores[-1].shape, expected_shape) expected_slice = torch.tensor([[-0.8805, -0.8803, -0.8799]], device=torch_device) self.assertTrue(torch.allclose(outputs.scores[-1][0, :3], expected_slice, atol=1e-4)) def test_visual_question_answering(self): processor = GitProcessor.from_pretrained("microsoft/git-base-textvqa") model = GitForCausalLM.from_pretrained("microsoft/git-base-textvqa") model.to(torch_device) file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset") image = Image.open(file_path).convert("RGB") inputs = processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) question = "what does the front of the bus say at the top?" input_ids = processor(text=question, add_special_tokens=False).input_ids input_ids = [processor.tokenizer.cls_token_id] + input_ids input_ids = torch.tensor(input_ids).unsqueeze(0).to(torch_device) generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=20) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] expected_shape = torch.Size((1, 15)) self.assertEqual(generated_ids.shape, expected_shape) self.assertEquals(generated_caption, "what does the front of the bus say at the top? special") def test_batched_generation(self): processor = GitProcessor.from_pretrained("microsoft/git-base-coco") model = GitForCausalLM.from_pretrained("microsoft/git-base-coco") model.to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=[image, image], return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) start_token_id = model.config.bos_token_id input_ids = torch.tensor([[start_token_id], [start_token_id]], device=torch_device) generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50) generated_captions = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertEquals(generated_captions, ["two cats sleeping on a pink blanket next to remotes."] * 2)
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpifytrue or a list of pytorch tensors if one specifies torchifytrue test if it raises when no input is passed for now the processor supports only inputids attentionmask pixelvalues 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpify true or a list of pytorch tensors if one specifies torchify true test if it raises when no input is passed for now the processor supports only input_ids attention_mask pixel_values
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, CLIPImageProcessor, GitProcessor, PreTrainedTokenizerFast @require_vision class GitProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = CLIPImageProcessor() tokenizer = BertTokenizer.from_pretrained( "hf-internal-testing/tiny-random-BertModel", model_input_names=["input_ids", "attention_mask"] ) processor = GitProcessor(image_processor, tokenizer) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = GitProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = GitProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"]) with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize imageprocessing create random pil images test not batched input glpnimageprocessor doesn t support batching initialize imageprocessing create random numpy tensors test not batched input glpnimageprocessor doesn t support batching initialize imageprocessing create random pytorch tensors test not batched input glpnimageprocessor doesn t support batching initialize imageprocessing create random numpy tensors test not batched input glpnimageprocessor doesn t support batching coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize image_processing create random pil images test not batched input glpnimageprocessor doesn t support batching initialize image_processing create random numpy tensors test not batched input glpnimageprocessor doesn t support batching initialize image_processing create random pytorch tensors test not batched input glpnimageprocessor doesn t support batching initialize image_processing create random numpy tensors test not batched input glpnimageprocessor doesn t support batching
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class GLPNImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size_divisor=32, do_rescale=True, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size_divisor = size_divisor self.do_rescale = do_rescale def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } def expected_output_image_shape(self, images): if isinstance(images[0], Image.Image): width, height = images[0].size else: height, width = images[0].shape[1], images[0].shape[2] height = height // self.size_divisor * self.size_divisor width = width // self.size_divisor * self.size_divisor return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, size_divisor=self.size_divisor, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class GLPNImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = GLPNImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = GLPNImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size_divisor")) self.assertTrue(hasattr(image_processing, "resample")) self.assertTrue(hasattr(image_processing, "do_rescale")) def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) def test_call_numpy_4_channels(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.image_processing_class.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) self.image_processing_class.num_channels = 3
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch glpn model import unittest from transformers import istorchavailable isvisionavailable from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelmapping glpnconfig glpnfordepthestimation glpnmodel from transformers models glpn modelingglpn import glpnpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import glpnimageprocessor class glpnconfigtesterconfigtester def createandtestconfigcommonpropertiesself config self configclassself inputsdict self parent asserttruehasattrconfig hiddensizes self parent asserttruehasattrconfig numattentionheads self parent asserttruehasattrconfig numencoderblocks class glpnmodeltester def init self parent batchsize13 imagesize64 numchannels3 numencoderblocks4 depths2 2 2 2 srratios8 4 2 1 hiddensizes16 32 64 128 downsamplingrates1 4 8 16 numattentionheads1 2 4 8 istrainingtrue uselabelstrue hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 initializerrange0 02 decoderhiddensize16 numlabels3 scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self numencoderblocks numencoderblocks self srratios srratios self depths depths self hiddensizes hiddensizes self downsamplingrates downsamplingrates self numattentionheads numattentionheads self istraining istraining self uselabels uselabels self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self decoderhiddensize decoderhiddensize self numlabels numlabels self scope scope def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self imagesize self imagesize self numlabels config self getconfig return config pixelvalues labels def getconfigself return glpnconfig imagesizeself imagesize numchannelsself numchannels numencoderblocksself numencoderblocks depthsself depths hiddensizesself hiddensizes numattentionheadsself numattentionheads hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange decoderhiddensizeself decoderhiddensize def createandcheckmodelself config pixelvalues labels model glpnmodelconfigconfig model totorchdevice model eval result modelpixelvalues expectedheight expectedwidth self imagesize self downsamplingrates1 2 self parent assertequal result lasthiddenstate shape self batchsize self hiddensizes1 expectedheight expectedwidth def createandcheckfordepthestimationself config pixelvalues labels config numlabels self numlabels model glpnfordepthestimationconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult predicteddepth shape self batchsize self imagesize self imagesize result modelpixelvalues labelslabels self parent assertequalresult predicteddepth shape self batchsize self imagesize self imagesize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class glpnmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses glpnmodel glpnfordepthestimation if istorchavailable else pipelinemodelmapping depthestimation glpnfordepthestimation featureextraction glpnmodel if istorchavailable else testheadmasking false testpruning false testresizeembeddings false def setupself self modeltester glpnmodeltesterself self configtester glpnconfigtesterself configclassglpnconfig def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testfordepthestimationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfordepthestimationconfigandinputs unittest skipglpn does not use inputsembeds def testinputsembedsself pass unittest skipglpn does not have getinputembeddings method and getoutputembeddings methods def testmodelcommonattributesself pass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions expectednumattentions sumself modeltester depths self assertequallenattentions expectednumattentions check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions expectednumattentions verify the first attentions first block first layer expectedseqlen self modeltester imagesize 4 2 expectedreducedseqlen self modeltester imagesize 4 self modeltester srratios0 2 self assertlistequal listattentions0 shape3 self modeltester numattentionheads0 expectedseqlen expectedreducedseqlen verify the last attentions last block last layer expectedseqlen self modeltester imagesize 32 2 expectedreducedseqlen self modeltester imagesize 32 self modeltester srratios1 2 self assertlistequal listattentions1 shape3 self modeltester numattentionheads1 expectedseqlen expectedreducedseqlen outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions expectednumattentions verify the first attentions first block first layer expectedseqlen self modeltester imagesize 4 2 expectedreducedseqlen self modeltester imagesize 4 self modeltester srratios0 2 self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads0 expectedseqlen expectedreducedseqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers self modeltester numencoderblocks self assertequallenhiddenstates expectednumlayers verify the first hidden states first block self assertlistequal listhiddenstates0 shape3 self modeltester hiddensizes0 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testtrainingself if not self modeltester istraining return config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses if modelclass in getvaluesmodelmapping continue todo remove the following 3 lines once we have a modelfordepthestimationmapping this can then be incorporated into prepareforclass in testmodelingcommon py if modelclass name glpnfordepthestimation batchsize numchannels height width inputsdictpixelvalues shape inputsdictlabels torch zeros self modeltester batchsize height width devicetorchdevice long model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward slow def testmodelfrompretrainedself for modelname in glpnpretrainedmodelarchivelist 1 model glpnmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision slow class glpnmodelintegrationtestunittest testcase slow def testinferencedepthestimationself imageprocessor glpnimageprocessor frompretrainedglpnpretrainedmodelarchivelist0 model glpnfordepthestimation frompretrainedglpnpretrainedmodelarchivelist0 totorchdevice image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the predicted depth expectedshape torch size1 480 640 self assertequaloutputs predicteddepth shape expectedshape expectedslice torch tensor 3 4291 2 7865 2 5151 3 2841 2 7021 2 3502 3 1147 2 4625 2 2481 totorchdevice self asserttruetorch allcloseoutputs predicteddepth0 3 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch glpn model check that output_attentions also work using config verify the first attentions first block first layer verify the last attentions last block last layer check attention is always last and order is fine verify the first attentions first block first layer verify the first hidden states first block check that output_hidden_states also work using config todo remove the following 3 lines once we have a model_for_depth_estimation_mapping this can then be incorporated into _prepare_for_class in test_modeling_common py we will verify our results on an image of cute cats forward pass verify the predicted depth
import unittest from transformers import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MODEL_MAPPING, GLPNConfig, GLPNForDepthEstimation, GLPNModel from transformers.models.glpn.modeling_glpn import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class GLPNConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class GLPNModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 2, 4, 8], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, decoder_hidden_size=16, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.decoder_hidden_size = decoder_hidden_size self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return GLPNConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, decoder_hidden_size=self.decoder_hidden_size, ) def create_and_check_model(self, config, pixel_values, labels): model = GLPNModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = GLPNForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GLPNModel, GLPNForDepthEstimation) if is_torch_available() else () pipeline_model_mapping = ( {"depth-estimation": GLPNForDepthEstimation, "feature-extraction": GLPNModel} if is_torch_available() else {} ) test_head_masking = False test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = GLPNModelTester(self) self.config_tester = GLPNConfigTester(self, config_class=GLPNConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) @unittest.skip("GLPN does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("GLPN does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) expected_seq_len = (self.model_tester.image_size // 32) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING): continue if model_class.__name__ == "GLPNForDepthEstimation": batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): for model_name in GLPN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GLPNModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class GLPNModelIntegrationTest(unittest.TestCase): @slow def test_inference_depth_estimation(self): image_processor = GLPNImageProcessor.from_pretrained(GLPN_PRETRAINED_MODEL_ARCHIVE_LIST[0]) model = GLPNForDepthEstimation.from_pretrained(GLPN_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size([1, 480, 640]) self.assertEqual(outputs.predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite from common since attentionmask in combination with causalmask behaves slighly differently prepare inputs load corresponding pytorch class overwrite from common since attentionmask in combination with causalmask behaves slighly differently prepare inputs load corresponding pytorch class make sure weights are tied in pytorch 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite from common since attention_mask in combination with causal_mask behaves slighly differently prepare inputs load corresponding pytorch class skip the flax at the beginning overwrite from common since attention_mask in combination with causal_mask behaves slighly differently prepare inputs load corresponding pytorch class skip the flax at the beginning make sure weights are tied in pytorch
import tempfile import unittest import numpy as np import transformers from transformers import GPT2Config, GPT2Tokenizer, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gpt2.modeling_flax_gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model if is_torch_available(): import torch class FlaxGPT2ModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = GPT2Config( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=False, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) return (config, input_ids, input_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config, input_ids, attention_mask = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) attention_mask_cache = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_bool_attention_mask_in_generation(self, model_class_name, config, input_ids, attention_mask): model = model_class_name(config) output_int_att_mask = model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=3, ) output_bool_att_mask = model.generate( input_ids=input_ids, attention_mask=attention_mask.astype(bool), max_new_tokens=3, ) self.parent.assertTrue( (output_bool_att_mask.sequences == output_int_att_mask.sequences).all(), "Generated response differ between boolean and integer attention mask", ) @require_flax class FlaxGPT2ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxGPT2Model, FlaxGPT2LMHeadModel) if is_flax_available() else () all_generative_model_classes = (FlaxGPT2LMHeadModel,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxGPT2ModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) def test_bool_attention_mask_in_generation(self): for model_class_name in self.all_generative_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_bool_attention_mask_in_generation( model_class_name, config, input_ids, attention_mask ) @slow def test_batch_generation(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2", pad_token="</s>", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="np", padding=True, truncation=True) model = FlaxGPT2LMHeadModel.from_pretrained("gpt2") model.do_sample = False model.config.pad_token_id = model.config.eos_token_id jit_generate = jax.jit(model.generate) output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of words. I'm going to start with the first one.\n", "Hey, I'm not sure if I'm going to be able to do", ] self.assertListEqual(output_string, expected_string) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("gpt2", from_pt=True) outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice special case for doubleheads model cleanup as much as possible gpu memory occupied by pytorch define pad token eos token 50256 use different length sentences to test batching this tokenizer has no pad token so we have to set it in some way define pad token eos token 50256 use different length sentences to test batching cleanup as much as possible gpu memory occupied by pytorch the dog the dog was found in a field near the intersection of west and west streets nnthe dog coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice special case for doubleheads model clean up as much as possible gpu memory occupied by pytorch define pad token eos token 50256 use different length sentences to test batching token_type_ids should change output this tokenizer has no pad token so we have to set it in some way define pad token eos token 50256 use different length sentences to test batching token_type_ids should change output clean up as much as possible gpu memory occupied by pytorch the dog the dog was found in a field near the intersection of west and west streets n nthe dog fmt skip token_type_ids should change output
import datetime import gc import math import unittest from transformers import GPT2Config, is_torch_available from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, GPT2DoubleHeadsModel, GPT2ForQuestionAnswering, GPT2ForSequenceClassification, GPT2ForTokenClassification, GPT2LMHeadModel, GPT2Model, GPT2Tokenizer, ) class GPT2ModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def get_large_model_config(self): return GPT2Config.from_pretrained("gpt2") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return GPT2Config( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_inner=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPT2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPT2Model(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt2_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPT2Model(config=config) model.to(torch_device) model.eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 output, past = model(input_ids, attention_mask=attn_mask).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt2_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPT2Model(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPT2LMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = GPT2LMHeadModel(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_double_lm_head_model( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args ): model = GPT2DoubleHeadsModel(config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() inputs = { "input_ids": multiple_choice_inputs_ids, "mc_token_ids": mc_token_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, "labels": multiple_choice_inputs_ids, } result = model(**inputs) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices)) def create_and_check_gpt2_for_question_answering( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPT2ForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_gpt2_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPT2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_gpt2_for_token_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPT2ForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_gpt2_weight_initialization(self, config, *args): model = GPT2Model(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class GPT2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel, GPT2ForQuestionAnswering, GPT2ForSequenceClassification, GPT2ForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GPT2Model, "question-answering": GPT2ForQuestionAnswering, "text-classification": GPT2ForSequenceClassification, "text-generation": GPT2LMHeadModel, "token-classification": GPT2ForTokenClassification, "zero-shot": GPT2ForSequenceClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else () fx_compatible = True test_missing_keys = False test_model_parallel = True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "GPT2DoubleHeadsModel": inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["input_ids"] = inputs_dict["labels"] inputs_dict["token_type_ids"] = inputs_dict["labels"] inputs_dict["mc_token_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=torch_device, ) inputs_dict["mc_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = GPT2ModelTester(self) self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_config(self): self.config_tester.run_common_tests() def test_gpt2_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model(*config_and_inputs) def test_gpt2_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs) def test_gpt2_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs) def test_gpt2_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_past_large_inputs(*config_and_inputs) def test_gpt2_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_gpt2_double_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs) def test_gpt2_question_answering_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_for_question_answering(*config_and_inputs) def test_gpt2_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs) def test_gpt2_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_for_token_classification(*config_and_inputs) def test_gpt2_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_gpt2_scale_attn_by_inverse_layer_idx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(scale_attn_by_inverse_layer_idx=True) self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs) def test_gpt2_reorder_and_upcast_attn(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(reorder_and_upcast_attn=True) self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs) def test_gpt2_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_weight_initialization(*config_and_inputs) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_batch_generation(self): model = GPT2LMHeadModel.from_pretrained("gpt2") model.to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) token_type_ids = torch.cat( [ input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0), input_ids.new_full((input_ids.shape[0], 1), 500), ], dim=-1, ) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) outputs_tt = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), token_type_ids=token_type_ids, ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a mess. I'm not sure if he's going", "Today, I'm going to be doing a lot of research on this. I", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertTrue(batch_out_sentence_tt != batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_batch_generation_2heads(self): model = GPT2DoubleHeadsModel.from_pretrained("gpt2") model.to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) token_type_ids = torch.cat( [ input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0), input_ids.new_full((input_ids.shape[0], 1), 500), ], dim=-1, ) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) outputs_tt = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), token_type_ids=token_type_ids, ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a mess. I'm not sure if he's going", "Today, I'm going to be doing a lot of research on this. I", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertTrue(batch_out_sentence_tt != batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_model_from_pretrained(self): for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GPT2Model.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class GPT2ModelLanguageGenerationTest(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def _test_lm_generate_gpt2_helper( self, gradient_checkpointing=False, reorder_and_upcast_attn=False, scale_attn_by_inverse_layer_idx=False, verify_outputs=True, ): model = GPT2LMHeadModel.from_pretrained( "gpt2", reorder_and_upcast_attn=reorder_and_upcast_attn, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, ) if gradient_checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) expected_output_ids = [464, 3290, 373, 1043, 287, 257, 2214, 1474, 262, 16246, 286, 2688, 290, 2688, 27262, 13, 198, 198, 464, 3290,] output_ids = model.generate(input_ids, do_sample=False) if verify_outputs: self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @slow def test_lm_generate_gpt2(self): self._test_lm_generate_gpt2_helper() @slow def test_lm_generate_gpt2_with_gradient_checkpointing(self): self._test_lm_generate_gpt2_helper(gradient_checkpointing=True) @slow def test_lm_generate_gpt2_with_reorder_and_upcast_attn(self): self._test_lm_generate_gpt2_helper(reorder_and_upcast_attn=True) @slow def test_lm_generate_gpt2_with_scale_attn_by_inverse_layer_idx(self): self._test_lm_generate_gpt2_helper(scale_attn_by_inverse_layer_idx=True, verify_outputs=False) @slow def test_gpt2_sample(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2LMHeadModel.from_pretrained("gpt2") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) output_ids = model.generate(input_ids, do_sample=True) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) token_type_ids = tokenized.token_type_ids.to(torch_device) output_seq = model.generate(input_ids=input_ids, do_sample=True, num_return_sequences=5) output_seq_tt = model.generate( input_ids=input_ids, token_type_ids=token_type_ids, do_sample=True, num_return_sequences=5 ) output_seq_strs = tokenizer.batch_decode(output_seq, skip_special_tokens=True) output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True) EXPECTED_OUTPUT_STR = ( "Today is a nice day and if you don't know anything about the state of play during your holiday" ) self.assertEqual(output_str, EXPECTED_OUTPUT_STR) self.assertTrue( all(output_seq_strs[idx] != output_seq_tt_strs[idx] for idx in range(len(output_seq_tt_strs))) ) @slow def test_gpt2_sample_max_time(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2LMHeadModel.from_pretrained("gpt2") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) MAX_TIME = 0.5 start = datetime.datetime.now() model.generate(input_ids, do_sample=True, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=True, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=None, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) @slow def test_contrastive_search_gpt2(self): article = ( "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based" ) gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2-large") gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2-large").to(torch_device) input_ids = gpt2_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = gpt2_model.generate(input_ids, penalty_alpha=0.6, top_k=4, max_length=256) generated_text = gpt2_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based in London, " "United Kingdom\n\nGoogle has a lot of data on its users and uses it to improve its products, such as " "Google Now, which helps users find the information they're looking for on the web. But the company " "is not the only one to collect data on its users. Facebook, for example, has its own facial " "recognition technology, as well as a database of millions of photos that it uses to personalize its " "News Feed.\n\nFacebook's use of data is a hot topic in the tech industry, with privacy advocates " "concerned about the company's ability to keep users' information private. In a blog post last " 'year, Facebook CEO Mark Zuckerberg said his company would "do our best to be transparent about our ' 'data use and how we use it."\n\n"We have made it clear that we do not sell or share your data with ' 'third parties," Zuckerberg wrote. "If you have questions or concerns, please reach out to us at ' '[email protected]."\n\nGoogle declined to comment on the privacy implications of its use of data, ' "but said in a statement to The Associated Press that" ], )
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob typevocabsizeself typevocabsize initializerrangeself initializerrange first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice overwrite from common since onnx runtime optimization doesn t work with tf gather when the argument batchdims 0 skip these 2 classes which uses tf gather with batchdims1 todo joao fix me forces the generation to happen on cpu to avoid gpurelated quirks tests that the generated text is the same regarless of left padding model tfgpt2lmheadmodel frompretraineddistilgpt2 tokenizer gpt2tokenizer frompretraineddistilgpt2 tokenizer padtoken tokenizer eostoken tokenizer paddingside left generationkwargs badwordsids tokenizeris inputids tokenizerangry about inputids norepeatngramsize 2 dosample false repetitionpenalty 1 3 expectedoutputstring today is a beautiful day and i am so happy to be able take part in this amazing event sentences today is a beautiful day and inputids tokenizersentences returntensorstf paddingtrue using default length outputids model generateinputids generationkwargs outputstrings tokenizer batchdecodeoutputids skipspecialtokenstrue self assertequaloutputstrings0 expectedoutputstring sentences today is a beautiful day and this is a very long input that we absolutely don t care about inputids tokenizersentences returntensorstf paddingtrue longer max length to capture the full length remember it is left padded outputids model generateinputids generationkwargs maxlength27 outputstrings tokenizer batchdecodeoutputids skipspecialtokenstrue self assertequaloutputstrings0 expectedoutputstring slow def testlmgenerategpt2greedyxlaself model tfgpt2lmheadmodel frompretrainedgpt2 tokenizer gpt2tokenizer frompretrainedgpt2 tokenizer padtoken tokenizer eostoken tokenizer paddingside left sentences the dog the flying machine expectedoutputstrings the dog was found in a field near the intersection of west and west streets nnthe the flying machine is a small lightweight and lightweight aircraft that can be used for any type of inputids tokenizersentences returntensorstf paddingtrue outputids model generateinputids dosamplefalse outputstrings tokenizer batchdecodeoutputids skipspecialtokenstrue self assertlistequaloutputstrings expectedoutputstrings xlagenerate tf functionmodel generate jitcompiletrue outputids xlagenerateinputids dosamplefalse outputstrings tokenizer batchdecodeoutputids skipspecialtokenstrue self assertlistequaloutputstrings expectedoutputstrings slow def testlmgenerategpt2samplexlaself note due to the small numerical differences that are natural when we compile to xla sampling the same output out of the same seed is far from guaranteed we can however confirm that the results are sensible and that we can seed both versions forces the generation to happen on cpu to avoid gpurelated quirks with tf device cpu 0 model tfgpt2lmheadmodel frompretrainedgpt2 tokenizer gpt2tokenizer frompretrainedgpt2 tokenizer padtoken tokenizer eostoken tokenizer paddingside left sentence the dog the flying machine expectedoutputstring the dog owner asked why did our vet decide there needed to be extra ventilation inside because most puppies the flying machine was made by an artist who found it difficult to control it as it did not use expectedoutputstringxla the dog has been named in connection with the murder of a 20yearold man in the flying machine is a new and improved system to operate and operate a new system and system system system inputids tokenizersentence returntensorstf paddingtrue outputids model generateinputids dosampletrue seed7 0 outputstrings tokenizer batchdecodeoutputids skipspecialtokenstrue self assertlistequaloutputstrings expectedoutputstring xlagenerate tf functionmodel generate jitcompiletrue outputids xlagenerateinputids dosampletrue seed7 0 outputstrings tokenizer batchdecodeoutputids skipspecialtokenstrue self assertlistequaloutputstrings expectedoutputstringxla slow def testlmgenerategpt2beamsearchxlaself model tfgpt2lmheadmodel frompretrainedgpt2 tokenizer gpt2tokenizer frompretrainedgpt2 tokenizer padtoken tokenizer eostoken tokenizer paddingside left sentences the dog the flying machine expectedoutputstrings the dog was found in the backyard of a home in the 6500 block of south main street the flying machine is a very powerful machine but it s not a very powerful machine it s inputids tokenizersentences returntensorstf paddingtrue outputids model generateinputids dosamplefalse numbeams2 outputstrings tokenizer batchdecodeoutputids skipspecialtokenstrue self assertlistequaloutputstrings expectedoutputstrings xlagenerate tf functionmodel generate jitcompiletrue outputids xlagenerateinputids dosamplefalse numbeams2 outputstrings tokenizer batchdecodeoutputids skipspecialtokenstrue self assertlistequaloutputstrings expectedoutputstrings slow def testcontrastivesearchgpt2self article deepmind technologies is a british artificial intelligence subsidiary of alphabet inc and research laboratory founded in 2010 deepmind was acquired by google in 2014 the company is based gpt2tokenizer gpt2tokenizer frompretrainedgpt2large gpt2model tfgpt2lmheadmodel frompretrainedgpt2large inputids gpt2tokenizerarticle returntensorstf outputs gpt2model generateinputids penaltyalpha0 6 topk4 maxlength256 generatedtext gpt2tokenizer batchdecodeoutputs skipspecialtokenstrue self assertlistequal generatedtext deepmind technologies is a british artificial intelligence subsidiary of alphabet inc and research laboratory founded in 2010 deepmind was acquired by google in 2014 the company is based in london united kingdomnngoogle has a lot of data on its users and uses it to improve its products such as google now which helps users find the information they re looking for on the web but the company is not the only one to collect data on its users facebook for example has its own facial recognition technology as well as a database of millions of photos that it uses to personalize its news feed nnfacebook s use of data is a hot topic in the tech industry with privacy advocates concerned about the company s ability to keep users information private in a blog post last year facebook ceo mark zuckerberg said his company would do our best to be transparent about our data use and how we use it nnwe have made it clear that we do not sell or share your data with third parties zuckerberg wrote if you have questions or concerns please reach out to us at privacyfacebook com nngoogle declined to comment on the privacy implications of its use of data but said in a statement to the associated press that slow def testcontrastivesearchgpt2xlaself article deepmind technologies is a british artificial intelligence subsidiary of alphabet inc and research laboratory founded in 2010 deepmind was acquired by google in 2014 the company is based gpt2tokenizer gpt2tokenizer frompretrainedgpt2large gpt2model tfgpt2lmheadmodel frompretrainedgpt2large inputids gpt2tokenizerarticle returntensorstf xlagenerate tf functiongpt2model generate jitcompiletrue outputs xlagenerateinputids penaltyalpha0 6 topk4 maxlength256 generatedtext gpt2tokenizer batchdecodeoutputs skipspecialtokenstrue self assertlistequal generatedtext deepmind technologies is a british artificial intelligence subsidiary of alphabet inc and research laboratory founded in 2010 deepmind was acquired by google in 2014 the company is based in london united kingdomnngoogle has a lot of data on its users and uses it to improve its products such as google now which helps users find the information they re looking for on the web but the company is not the only one to collect data on its users facebook for example has its own facial recognition technology as well as a database of millions of photos that it uses to personalize its news feed nnfacebook s use of data is a hot topic in the tech industry with privacy advocates concerned about the company s ability to keep users information private in a blog post last year facebook ceo mark zuckerberg said his company would do our best to be transparent about our data use and how we use it nnwe have made it clear that we do not sell or share your data with third parties zuckerberg wrote if you have questions or concerns please reach out to us at privacyfacebook com nngoogle declined to comment on the privacy implications of its use of data but said in a statement to the associated press that coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license intermediate_size self intermediate_size hidden_act self hidden_act hidden_dropout_prob self hidden_dropout_prob attention_probs_dropout_prob self attention_probs_dropout_prob type_vocab_size self type_vocab_size initializer_range self initializer_range none is the input for past first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice overwrite from common since onnx runtime optimization doesn t work with tf gather when the argument batch_dims 0 skip these 2 classes which uses tf gather with batch_dims 1 todo joao fix me seed set deterministic sampling sequence deterministic generation forces the generation to happen on cpu to avoid gpu related quirks tests that the generated text is the same regarless of left padding using default length longer max length to capture the full length remember it is left padded note due to the small numerical differences that are natural when we compile to xla sampling the same output out of the same seed is far from guaranteed we can however confirm that the results are sensible and that we can seed both versions forces the generation to happen on cpu to avoid gpu related quirks
from __future__ import annotations import unittest from transformers import GPT2Config, is_tf_available from transformers.testing_utils import require_tf, require_tf2onnx, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPT2Tokenizer from transformers.models.gpt2.modeling_tf_gpt2 import ( TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, TFGPT2DoubleHeadsModel, TFGPT2ForSequenceClassification, TFGPT2LMHeadModel, TFGPT2Model, ) from transformers.tf_utils import shape_list class TFGPT2ModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_token_type_ids = True self.use_input_mask = True self.use_labels = True self.use_mc_token_ids = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.bos_token_id = self.vocab_size - 1 self.eos_token_id = self.vocab_size - 1 self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = GPT2Config( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=True, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPT2Model(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) inputs = [input_ids, None, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPT2Model(config=config) outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_gpt2_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = TFGPT2Model(config=config) half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) output, past_key_values = model(input_ids, attention_mask=attn_mask).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat([attn_mask, tf.ones((shape_list(attn_mask)[0], 1), dtype=tf.int32)], axis=1) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-12) def create_and_check_gpt2_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = TFGPT2Model(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] token_type_ids = token_type_ids[:1, :] self.batch_size = 1 outputs = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_token_types = ids_tensor((self.batch_size, 3), self.type_vocab_size) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past_key_values, )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_gpt2_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPT2LMHeadModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_gpt2_double_head( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args ): model = TFGPT2DoubleHeadsModel(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "mc_token_ids": mc_token_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices)) def create_and_check_gpt2_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": sequence_labels, } model = TFGPT2ForSequenceClassification(config) result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class TFGPT2ModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFGPT2Model, TFGPT2LMHeadModel, TFGPT2ForSequenceClassification, TFGPT2DoubleHeadsModel) if is_tf_available() else () ) all_generative_model_classes = (TFGPT2LMHeadModel,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFGPT2Model, "text-classification": TFGPT2ForSequenceClassification, "text-generation": TFGPT2LMHeadModel, "zero-shot": TFGPT2ForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = True onnx_min_opset = 10 def setUp(self): self.model_tester = TFGPT2ModelTester(self) self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_gpt2_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model(*config_and_inputs) def test_gpt2_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs) def test_gpt2_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs) def test_gpt2_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_past_large_inputs(*config_and_inputs) def test_gpt2_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_lm_head(*config_and_inputs) def test_gpt2_double_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_double_head(*config_and_inputs) def test_gpt2_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFGPT2Model.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf2onnx @slow def test_onnx_runtime_optimize(self): if not self.test_onnx: return import onnxruntime import tf2onnx config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class in [TFGPT2ForSequenceClassification, TFGPT2DoubleHeadsModel]: continue model = model_class(config) model.build() onnx_model_proto, _ = tf2onnx.convert.from_keras(model, opset=self.onnx_min_opset) onnxruntime.InferenceSession(onnx_model_proto.SerializeToString()) @unittest.skip("Onnx compliancy broke with TF 2.10") def test_onnx_compliancy(self): pass @require_tf class TFGPT2ModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_greedy_distilgpt2_batch_special(self): model = TFGPT2LMHeadModel.from_pretrained("distilgpt2") tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["Today is a beautiful day and", "Yesterday was"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) generation_kwargs = { "bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids], "no_repeat_ngram_size": 2, "do_sample": False, "repetition_penalty": 1.3, } output_ids = model.generate(**input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = [ "Today is a beautiful day and I am so happy to be able take part in this amazing event.", "Yesterday was a very interesting time for the world to see how much of this is", ] self.assertListEqual(output_strings, expected_output_string) @slow def test_lm_generate_sample_distilgpt2_batch_special(self): model = TFGPT2LMHeadModel.from_pretrained("distilgpt2") tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["Today is a beautiful day and", "Yesterday was"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) generation_kwargs = { "do_sample": True, "bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids], "no_repeat_ngram_size": 2, "repetition_penalty": 1.3, "temperature": 1.5, "top_k": 500, "top_p": 0.9, "seed": [42, 0], } with tf.device(":/CPU:0"): output_ids = model.generate(**input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = [ "Today is a beautiful day and we will make you feel very hot/terrific in all your", "Yesterday was known by national television networks as Le Big Show or Wild Dog Jeopard", ] self.assertListEqual(output_strings, expected_output_string) @slow def test_lm_generate_greedy_distilgpt2_beam_search_special(self): model = TFGPT2LMHeadModel.from_pretrained("distilgpt2") tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["Today is a beautiful day and", "Yesterday was"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) generation_kwargs = { "bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids], "no_repeat_ngram_size": 2, "do_sample": False, "num_beams": 2, } output_ids = model.generate(**input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = [ "Today is a beautiful day and a great day for all of us.\n\nI’m", "Yesterday was the first time that a person has been arrested in the United States for", ] self.assertListEqual(output_strings, expected_output_string) @slow def test_lm_generate_distilgpt2_left_padding(self): model = TFGPT2LMHeadModel.from_pretrained("distilgpt2") tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" generation_kwargs = { "bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids], "no_repeat_ngram_size": 2, "do_sample": False, "repetition_penalty": 1.3, } expected_output_string = ( "Today is a beautiful day and I am so happy to be able take part in this amazing event." ) sentences = ["Today is a beautiful day and"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) output_ids = model.generate(**input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(output_strings[0], expected_output_string) sentences = ["Today is a beautiful day and", "This is a very long input that we absolutely don't care about"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) output_ids = model.generate(**input_ids, **generation_kwargs, max_length=27) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(output_strings[0], expected_output_string) @slow def test_lm_generate_gpt2_greedy_xla(self): model = TFGPT2LMHeadModel.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["The dog", "The flying machine"] expected_output_strings = [ "The dog was found in a field near the intersection of West and West Streets.\n\nThe", "The flying machine is a small, lightweight, and lightweight aircraft that can be used for any type of", ] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) output_ids = model.generate(**input_ids, do_sample=False) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_strings) xla_generate = tf.function(model.generate, jit_compile=True) output_ids = xla_generate(**input_ids, do_sample=False) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_strings) @slow def test_lm_generate_gpt2_sample_xla(self): with tf.device(":/CPU:0"): model = TFGPT2LMHeadModel.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentence = ["The dog", "The flying machine"] expected_output_string = [ "The dog owner asked why did our vet decide there needed to be extra ventilation inside because most" " puppies", "The flying machine was made by an artist who found it difficult to control it as it did not use", ] expected_output_string_xla = [ "The dog has been named in connection with the murder of a 20-year-old man in", "The flying machine is a new and improved system to operate and operate a new system and system " "system system", ] input_ids = tokenizer(sentence, return_tensors="tf", padding=True) output_ids = model.generate(**input_ids, do_sample=True, seed=[7, 0]) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_string) xla_generate = tf.function(model.generate, jit_compile=True) output_ids = xla_generate(**input_ids, do_sample=True, seed=[7, 0]) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_string_xla) @slow def test_lm_generate_gpt2_beam_search_xla(self): model = TFGPT2LMHeadModel.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["The dog", "The flying machine"] expected_output_strings = [ "The dog was found in the backyard of a home in the 6500 block of South Main Street", "The flying machine is a very powerful machine, but it's not a very powerful machine. It's", ] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) output_ids = model.generate(**input_ids, do_sample=False, num_beams=2) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_strings) xla_generate = tf.function(model.generate, jit_compile=True) output_ids = xla_generate(**input_ids, do_sample=False, num_beams=2) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_strings) @slow def test_contrastive_search_gpt2(self): article = ( "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based" ) gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2-large") gpt2_model = TFGPT2LMHeadModel.from_pretrained("gpt2-large") input_ids = gpt2_tokenizer(article, return_tensors="tf") outputs = gpt2_model.generate(**input_ids, penalty_alpha=0.6, top_k=4, max_length=256) generated_text = gpt2_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based in London, " "United Kingdom\n\nGoogle has a lot of data on its users and uses it to improve its products, such as " "Google Now, which helps users find the information they're looking for on the web. But the company " "is not the only one to collect data on its users. Facebook, for example, has its own facial " "recognition technology, as well as a database of millions of photos that it uses to personalize its " "News Feed.\n\nFacebook's use of data is a hot topic in the tech industry, with privacy advocates " "concerned about the company's ability to keep users' information private. In a blog post last " 'year, Facebook CEO Mark Zuckerberg said his company would "do our best to be transparent about our ' 'data use and how we use it."\n\n"We have made it clear that we do not sell or share your data with ' 'third parties," Zuckerberg wrote. "If you have questions or concerns, please reach out to us at ' '[email protected]."\n\nGoogle declined to comment on the privacy implications of its use of data, ' "but said in a statement to The Associated Press that" ], ) @slow def test_contrastive_search_gpt2_xla(self): article = ( "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based" ) gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2-large") gpt2_model = TFGPT2LMHeadModel.from_pretrained("gpt2-large") input_ids = gpt2_tokenizer(article, return_tensors="tf") xla_generate = tf.function(gpt2_model.generate, jit_compile=True) outputs = xla_generate(**input_ids, penalty_alpha=0.6, top_k=4, max_length=256) generated_text = gpt2_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based in London, " "United Kingdom\n\nGoogle has a lot of data on its users and uses it to improve its products, such as " "Google Now, which helps users find the information they're looking for on the web. But the company " "is not the only one to collect data on its users. Facebook, for example, has its own facial " "recognition technology, as well as a database of millions of photos that it uses to personalize its " "News Feed.\n\nFacebook's use of data is a hot topic in the tech industry, with privacy advocates " "concerned about the company's ability to keep users' information private. In a blog post last " 'year, Facebook CEO Mark Zuckerberg said his company would "do our best to be transparent about our ' 'data use and how we use it."\n\n"We have made it clear that we do not sell or share your data with ' 'third parties," Zuckerberg wrote. "If you have questions or concerns, please reach out to us at ' '[email protected]."\n\nGoogle declined to comment on the privacy implications of its use of data, ' "but said in a statement to The Associated Press that" ], )
inputmask tf reshapeinputmask 1 maxseqlen the tf tokenizers are usually going to be used as pretrained tokenizers from existing model checkpoints so that s what we focus on here convert them to numpy to avoid messing with ragged tensors we may see small differences because the loaded model is compiled so we need an epsilon for the test for the test to run input_mask tf reshape input_mask 1 max_seq_len the tf tokenizers are usually going to be used as pretrained tokenizers from existing model checkpoints so that s what we focus on here convert them to numpy to avoid messing with ragged tensors build model with some sample inputs we may see small differences because the loaded model is compiled so we need an epsilon for the test build model with some sample inputs for the test to run
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPT2LMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpt2 import TFGPT2Tokenizer TOKENIZER_CHECKPOINTS = ["gpt2"] TINY_MODEL_CHECKPOINT = "gpt2" if is_tf_available(): class ModelToSave(tf.Module): def __init__(self, tokenizer): super().__init__() self.tokenizer = tokenizer config = AutoConfig.from_pretrained(TINY_MODEL_CHECKPOINT) self.model = TFGPT2LMHeadModel.from_config(config) @tf.function(input_signature=(tf.TensorSpec((None,), tf.string, name="text"),)) def serving(self, text): tokenized = self.tokenizer(text) input_ids_dense = tokenized["input_ids"].to_tensor() input_mask = tf.cast(input_ids_dense > 0, tf.int32) outputs = self.model(input_ids=input_ids_dense, attention_mask=input_mask)["logits"] return outputs @require_tf @require_keras_nlp class GPTTokenizationTest(unittest.TestCase): def setUp(self): super().setUp() self.tokenizers = [GPT2Tokenizer.from_pretrained(checkpoint) for checkpoint in (TOKENIZER_CHECKPOINTS)] self.tf_tokenizers = [TFGPT2Tokenizer.from_pretrained(checkpoint) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers) == len(self.tf_tokenizers) self.test_sentences = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] self.paired_sentences = list(zip(self.test_sentences, self.test_sentences[::-1])) def test_output_equivalence(self): for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers): for test_inputs in self.test_sentences: python_outputs = tokenizer([test_inputs], return_tensors="tf") tf_outputs = tf_tokenizer([test_inputs]) for key in python_outputs.keys(): python_outputs_values = python_outputs[key].numpy() tf_outputs_values = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs_values, tf.int64) == tf_outputs_values)) @slow def test_graph_mode(self): for tf_tokenizer in self.tf_tokenizers: compiled_tokenizer = tf.function(tf_tokenizer) for test_inputs in self.test_sentences: test_inputs = tf.constant(test_inputs) compiled_outputs = compiled_tokenizer(test_inputs) eager_outputs = tf_tokenizer(test_inputs) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def test_saved_model(self): for tf_tokenizer in self.tf_tokenizers: model = ModelToSave(tokenizer=tf_tokenizer) test_inputs = tf.convert_to_tensor([self.test_sentences[0]]) out = model.serving(test_inputs) with TemporaryDirectory() as tempdir: save_path = Path(tempdir) / "saved.model" tf.saved_model.save(model, save_path, signatures={"serving_default": model.serving}) loaded_model = tf.saved_model.load(save_path) loaded_output = loaded_model.signatures["serving_default"](test_inputs)["output_0"] self.assertTrue(tf.reduce_all(out == loaded_output)) @slow def test_from_config(self): for tf_tokenizer in self.tf_tokenizers: test_inputs = tf.convert_to_tensor([self.test_sentences[0]]) out = tf_tokenizer(test_inputs) config = tf_tokenizer.get_config() model_from_config = TFGPT2Tokenizer.from_config(config) from_config_output = model_from_config(test_inputs) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key])) @slow def test_padding(self): for tf_tokenizer in self.tf_tokenizers: tf_tokenizer.pad_token_id = 123123 for max_length in [3, 5, 1024]: test_inputs = tf.convert_to_tensor([self.test_sentences[0]]) out = tf_tokenizer(test_inputs, max_length=max_length) out_length = out["input_ids"].numpy().shape[1] assert out_length == max_length
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite from common since attentionmask in combination with causalmask behaves slighly differently prepare inputs load corresponding pytorch class overwrite from common since attentionmask in combination with causalmask behaves slighly differently prepare inputs load corresponding pytorch class make sure weights are tied in pytorch 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite from common since attention_mask in combination with causal_mask behaves slighly differently prepare inputs load corresponding pytorch class skip the flax at the beginning overwrite from common since attention_mask in combination with causal_mask behaves slighly differently prepare inputs load corresponding pytorch class skip the flax at the beginning make sure weights are tied in pytorch
import tempfile import unittest import numpy as np import transformers from transformers import GPT2Tokenizer, GPTNeoConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gpt_neo.modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel if is_torch_available(): import torch class FlaxGPTNeoModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, attention_types=[[["global", "local"], 1]], intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, window_size=7, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_types = attention_types self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.window_size = window_size self.initializer_range = initializer_range self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = GPTNeoConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, max_position_embeddings=self.max_position_embeddings, use_cache=False, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, attention_types=self.attention_types, ) return (config, input_ids, input_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) attention_mask_cache = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxGPTNeoModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxGPTNeoModel, FlaxGPTNeoForCausalLM) if is_flax_available() else () all_generative_model_classes = (FlaxGPTNeoForCausalLM,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxGPTNeoModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) @slow def test_batch_generation(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2", pad_token="<|endoftext|>", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="np", padding=True, truncation=True) model = FlaxGPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M") model.do_sample = False model.config.pad_token_id = model.config.eos_token_id jit_generate = jax.jit(model.generate) output_sequences = jit_generate( inputs["input_ids"], attention_mask=inputs["attention_mask"], pad_token_id=tokenizer.pad_token_id ).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of text.\n\nI'm trying to get the text of the", "Hey, I'm a little late to the party. I'm going to", ] self.assertListEqual(output_string, expected_string) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("EleutherAI/gpt-neo-125M") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch gpt neo model import unittest from transformers import gptneoconfig istorchavailable from transformers testingutils import requiretorch slow torchdevice from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import gptneopretrainedmodelarchivelist gpt2tokenizer gptneoforcausallm gptneoforquestionanswering gptneoforsequenceclassification gptneofortokenclassification gptneomodel class gptneomodeltester def init self parent batchsize14 seqlength7 istrainingtrue usetokentypeidstrue useinputmasktrue uselabelstrue usemctokenidstrue vocabsize99 hiddensize32 numhiddenlayers2 attentiontypesglobal local 1 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 windowsize7 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self usetokentypeids usetokentypeids self useinputmask useinputmask self uselabels uselabels self usemctokenids usemctokenids self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self windowsize windowsize self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self bostokenid vocabsize 1 self eostokenid vocabsize 1 self padtokenid vocabsize 1 self attentiontypes attentiontypes def getlargemodelconfigself return gptneoconfig frompretrainedgptneo125m def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize mctokenids none if self usemctokenids mctokenids idstensorself batchsize self numchoices self seqlength sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig headmask idstensorself numhiddenlayers self numattentionheads 2 return config inputids inputmask headmask tokentypeids mctokenids sequencelabels tokenlabels choicelabels def getconfigself return gptneoconfig vocabsizeself vocabsize hiddensizeself hiddensize numlayersself numhiddenlayers numheadsself numattentionheads maxpositionembeddingsself maxpositionembeddings usecachetrue bostokenidself bostokenid eostokenidself eostokenid padtokenidself padtokenid windowsizeself windowsize attentiontypesself attentiontypes def getpipelineconfigself config self getconfig config vocabsize 300 return config def prepareconfigandinputsfordecoderself config inputids inputmask headmask tokentypeids mctokenids sequencelabels tokenlabels choicelabels self prepareconfigandinputs encoderhiddenstates floatstensorself batchsize self seqlength self hiddensize encoderattentionmask idstensorself batchsize self seqlength vocabsize2 return config inputids inputmask headmask tokentypeids sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask def createandcheckgptneomodelself config inputids inputmask headmask tokentypeids args model gptneomodelconfigconfig model totorchdevice model eval result modelinputids tokentypeidstokentypeids headmaskheadmask result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize pastkeyvalues is not implemented self parent assertequallenresult pastkeyvalues config nlayer def createandcheckgptneomodelpastself config inputids inputmask headmask tokentypeids args model gptneomodelconfigconfig model totorchdevice model eval first forward pass outputs modelinputids tokentypeidstokentypeids usecachetrue outputsusecacheconf modelinputids tokentypeidstokentypeids outputsnopast modelinputids tokentypeidstokentypeids usecachefalse self parent asserttruelenoutputs lenoutputsusecacheconf self parent asserttruelenoutputs lenoutputsnopast 1 output past outputs totuple create hypothetical next token and extent to nextinputids nexttokens idstensorself batchsize 1 config vocabsize nexttokentypes idstensorself batchsize 1 self typevocabsize append to next inputids and tokentypeids nextinputids torch catinputids nexttokens dim1 nexttokentypeids torch cattokentypeids nexttokentypes dim1 outputfromnopast modelnextinputids tokentypeidsnexttokentypeidslasthiddenstate outputfrompast modelnexttokens tokentypeidsnexttokentypes pastkeyvaluespast lasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 1 randomsliceidx detach outputfrompastslice outputfrompast 0 randomsliceidx detach test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def createandcheckgptneomodelattentionmaskpast self config inputids inputmask headmask tokentypeids args model gptneomodelconfigconfig model totorchdevice model eval create attention mask attnmask torch onesinputids shape dtypetorch long devicetorchdevice halfseqlength self seqlength 2 attnmask halfseqlength 0 first forward pass output past modelinputids attentionmaskattnmask totuple create hypothetical next token and extent to nextinputids nexttokens idstensorself batchsize 1 config vocabsize change a random masked slice from inputids randomseqidxtochange idstensor1 halfseqlength item 1 randomothernexttokens idstensorself batchsize 1 config vocabsize squeeze1 inputids randomseqidxtochange randomothernexttokens append to next inputids and attnmask nextinputids torch catinputids nexttokens dim1 attnmask torch cat attnmask torch onesattnmask shape0 1 dtypetorch long devicetorchdevice dim1 get two different outputs outputfromnopast modelnextinputids attentionmaskattnmasklasthiddenstate outputfrompast modelnexttokens pastkeyvaluespast attentionmaskattnmasklasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 1 randomsliceidx detach outputfrompastslice outputfrompast 0 randomsliceidx detach test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def createandcheckgptneomodelpastlargeinputs self config inputids inputmask headmask tokentypeids args model gptneomodelconfigconfig model totorchdevice model eval first forward pass outputs modelinputids tokentypeidstokentypeids attentionmaskinputmask usecachetrue output past outputs totuple create hypothetical next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nexttokentypes idstensorself batchsize 3 self typevocabsize nextmask idstensorself batchsize 3 vocabsize2 append to next inputids and tokentypeids nextinputids torch catinputids nexttokens dim1 nexttokentypeids torch cattokentypeids nexttokentypes dim1 nextattentionmask torch catinputmask nextmask dim1 outputfromnopast model nextinputids tokentypeidsnexttokentypeids attentionmasknextattentionmask lasthiddenstate outputfrompast model nexttokens tokentypeidsnexttokentypes attentionmasknextattentionmask pastkeyvaluespast lasthiddenstate self parent asserttrueoutputfrompast shape1 nexttokens shape1 select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def createandchecklmheadmodelself config inputids inputmask headmask tokentypeids args model gptneoforcausallmconfig model totorchdevice model eval result modelinputids tokentypeidstokentypeids labelsinputids self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckgptneoforquestionanswering self config inputids inputmask headmask tokentypeids mctokenids sequencelabels args config numlabels self numlabels model gptneoforquestionansweringconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckgptneoforsequenceclassification self config inputids inputmask headmask tokentypeids mctokenids sequencelabels args config numlabels self numlabels model gptneoforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckgptneofortokenclassification self config inputids inputmask headmask tokentypeids mctokenids sequencelabels args config numlabels self numlabels model gptneofortokenclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckforwardandbackwards self config inputids inputmask headmask tokentypeids args gradientcheckpointingfalse model gptneoforcausallmconfig if gradientcheckpointing model gradientcheckpointingenable model totorchdevice result modelinputids tokentypeidstokentypeids labelsinputids self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self seqlength self vocabsize result loss backward def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask headmask tokentypeids mctokenids sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids headmask headmask return config inputsdict requiretorch class gptneomodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses gptneomodel gptneoforcausallm gptneoforquestionanswering gptneoforsequenceclassification gptneofortokenclassification if istorchavailable else allgenerativemodelclasses gptneoforcausallm if istorchavailable else pipelinemodelmapping featureextraction gptneomodel questionanswering gptneoforquestionanswering textclassification gptneoforsequenceclassification textgeneration gptneoforcausallm tokenclassification gptneofortokenclassification zeroshot gptneoforsequenceclassification if istorchavailable else fxcompatible true testmissingkeys false testpruning false testmodelparallel false special case for doubleheads model def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels return inputsdict def setupself self modeltester gptneomodeltesterself self configtester configtesterself configclassgptneoconfig nembd37 def testconfigself self configtester runcommontests def testgptneomodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckgptneomodelconfigandinputs def testgptneomodelpastself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckgptneomodelpastconfigandinputs def testgptneomodelattmaskpastself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckgptneomodelattentionmaskpastconfigandinputs def testgptneomodelpastlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckgptneomodelpastlargeinputsconfigandinputs def testgptneolmheadmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandchecklmheadmodelconfigandinputs def testgptneoquestionansweringmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckgptneoforquestionansweringconfigandinputs def testgptneosequenceclassificationmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckgptneoforsequenceclassificationconfigandinputs def testgptneotokenclassificationmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckgptneofortokenclassificationconfigandinputs def testgptneogradientcheckpointingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforwardandbackwardsconfigandinputs gradientcheckpointingtrue def gethiddenstatesself return torch tensor 0 4983 0 7584 1 6944 0 5440 2 6918 0 4206 0 4176 0 2055 0 0071 0 0405 1 4920 0 3630 1 0492 0 1599 1 7648 0 2419 1 8348 2 0514 0 1946 0 3203 0 7672 1 1600 1 7118 0 9056 0 2986 0 5372 0 7729 0 1927 0 0285 0 2629 1 1156 1 1992 dtypetorch float32 devicetorchdevice def testlocalattnprobsself model gptneomodel frompretrainedvalhallagptneorandomtiny eval layer model h1 attn attention totorchdevice hiddenstates self gethiddenstates hiddenstates torch cathiddenstates hiddenstates 0 5 dim2 batchsize seqlength hiddenstates shape masktokens 2 attentionmask torch onesbatchsize seqlength devicetorchdevice dtypetorch long attentionmask masktokens 0 dont attend last masktokens attentionmask attentionmask viewbatchsize 1 attentionmask attentionmask none none attentionmask 1 0 attentionmask 10000 0 attnprobs layerhiddenstates attentionmaskattentionmask outputattentionstrue1 the last 2 tokens are masked and should have 0 attnprobs self asserttruetorch allattnprobs masktokens masktokens 0 in loacal attention each token can only attend to the previous windowsize tokens inlcuding itself here windowsize is 4 so a token at index 5 can only attend to indcies 2 3 4 5 and the attnprobs should be 0 for token 0 1 self asserttruetorch allattnprobs 5 2 6 0 self asserttruetorch allattnprobs 5 2 0 requiretorch class gptneomodellanguagegenerationtestunittest testcase cachedproperty def modelself return gptneoforcausallm frompretrainedeleutheraigptneo1 3b totorchdevice cachedproperty def tokenizerself return gpt2tokenizer frompretrainedeleutheraigptneo1 3b slow def testlmgenerategptneoself for checkpointing in true false model self model if checkpointing model gradientcheckpointingenable else model gradientcheckpointingdisable inputids torch tensor464 3290 dtypetorch long devicetorchdevice the dog the dogeared copy of the book which is a collection of essays by the late expectedoutputids 464 3290 12 3380 4866 286 262 1492 11 543 318 257 4947 286 27126 416 262 2739 1772 11 fmt skip outputids model generateinputids dosamplefalse self assertlistequaloutputids0 tolist expectedoutputids slow def testgptneosampleself model self model tokenizer self tokenizer torch manualseed0 tokenized tokenizertoday is a nice day and returntensorspt returntokentypeidstrue inputids tokenized inputids totorchdevice outputids model generateinputids dosampletrue outputstr tokenizer decodeoutputids0 skipspecialtokenstrue expectedoutputstr today is a nice day and if you dont get the memo here is what you can self assertequaloutputstr expectedoutputstr slow def testbatchgenerationself model self model tokenizer self tokenizer tokenizer paddingside left define pad token eos token 50256 tokenizer padtoken tokenizer eostoken model config padtokenid model config eostokenid use different length sentences to test batching sentences hello my dog is a little today i am inputs tokenizersentences returntensorspt paddingtrue inputids inputsinputids totorchdevice outputs model generate inputidsinputids attentionmaskinputsattentionmask totorchdevice inputsnonpadded tokenizersentences0 returntensorspt inputids totorchdevice outputnonpadded model generateinputidsinputsnonpadded numpaddings inputsnonpadded shape1 inputsattentionmask1 long sum cpu item inputspadded tokenizersentences1 returntensorspt inputids totorchdevice outputpadded model generateinputidsinputspadded maxlengthmodel config maxlength numpaddings batchoutsentence tokenizer batchdecodeoutputs skipspecialtokenstrue nonpaddedsentence tokenizer decodeoutputnonpadded0 skipspecialtokenstrue paddedsentence tokenizer decodeoutputpadded0 skipspecialtokenstrue expectedoutputsentence hello my dog is a little bit of a kitty she is a very sweet and loving today i am going to talk about the best way to get a job in the self assertlistequalexpectedoutputsentence batchoutsentence self assertlistequalexpectedoutputsentence nonpaddedsentence paddedsentence slow def testmodelfrompretrainedself for modelname in gptneopretrainedmodelarchivelist 1 model gptneomodel frompretrainedmodelname self assertisnotnonemodel coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch gpt neo model past_key_values is not implemented self parent assertequal len result past_key_values config n_layer first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice special case for doubleheads model dont attend last mask_tokens the last 2 tokens are masked and should have 0 attn_probs in loacal attention each token can only attend to the previous window_size tokens inlcuding itself here window_size is 4 so a token at index 5 can only attend to indcies 2 3 4 5 and the attn_probs should be 0 for token 0 1 the dog the dog eared copy of the book which is a collection of essays by the late fmt skip define pad token eos token 50256 use different length sentences to test batching
import unittest from transformers import GPTNeoConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPT2Tokenizer, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, ) class GPTNeoModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, attention_types=[[["global", "local"], 1]], num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, window_size=7, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.window_size = window_size self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 self.attention_types = attention_types def get_large_model_config(self): return GPTNeoConfig.from_pretrained("gpt-neo-125M") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config(self): return GPTNeoConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, max_position_embeddings=self.max_position_embeddings, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, attention_types=self.attention_types, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_gpt_neo_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_gpt_neo_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_neo_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 output, past = model(input_ids, attention_mask=attn_mask).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_neo_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTNeoForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_gpt_neo_for_question_answering( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTNeoForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_gpt_neo_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTNeoForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_gpt_neo_for_token_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTNeoForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = GPTNeoForCausalLM(config) if gradient_checkpointing: model.gradient_checkpointing_enable() model.to(torch_device) result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class GPTNeoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( GPTNeoModel, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (GPTNeoForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GPTNeoModel, "question-answering": GPTNeoForQuestionAnswering, "text-classification": GPTNeoForSequenceClassification, "text-generation": GPTNeoForCausalLM, "token-classification": GPTNeoForTokenClassification, "zero-shot": GPTNeoForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_missing_keys = False test_pruning = False test_model_parallel = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) return inputs_dict def setUp(self): self.model_tester = GPTNeoModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_gpt_neo_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model(*config_and_inputs) def test_gpt_neo_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model_past(*config_and_inputs) def test_gpt_neo_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model_attention_mask_past(*config_and_inputs) def test_gpt_neo_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model_past_large_inputs(*config_and_inputs) def test_gpt_neo_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_gpt_neo_question_answering_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_for_question_answering(*config_and_inputs) def test_gpt_neo_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_for_sequence_classification(*config_and_inputs) def test_gpt_neo_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_for_token_classification(*config_and_inputs) def test_gpt_neo_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def _get_hidden_states(self): return torch.tensor( [ [ [0.4983, -0.7584, -1.6944, 0.5440], [2.6918, 0.4206, 0.4176, 0.2055], [-0.0071, -0.0405, -1.4920, -0.3630], [1.0492, 0.1599, -1.7648, 0.2419], [-1.8348, 2.0514, -0.1946, 0.3203], [0.7672, -1.1600, -1.7118, -0.9056], [0.2986, 0.5372, 0.7729, -0.1927], [0.0285, 0.2629, -1.1156, -1.1992], ] ], dtype=torch.float32, device=torch_device, ) def test_local_attn_probs(self): model = GPTNeoModel.from_pretrained("valhalla/gpt-neo-random-tiny").eval() layer = model.h[1].attn.attention.to(torch_device) hidden_states = self._get_hidden_states() hidden_states = torch.cat([hidden_states, hidden_states - 0.5], dim=2) batch_size, seq_length, _ = hidden_states.shape mask_tokens = 2 attention_mask = torch.ones(batch_size, seq_length, device=torch_device, dtype=torch.long) attention_mask[:, -mask_tokens:] = 0 attention_mask = attention_mask.view(batch_size, -1) attention_mask = attention_mask[:, None, None, :] attention_mask = (1.0 - attention_mask) * -10000.0 attn_probs = layer(hidden_states, attention_mask=attention_mask, output_attentions=True)[-1] self.assertTrue(torch.all(attn_probs[:, :, -mask_tokens:, -mask_tokens:] == 0)) self.assertTrue(torch.all(attn_probs[:, :, 5, 2:6] != 0)) self.assertTrue(torch.all(attn_probs[:, :, 5, :2] == 0)) @require_torch class GPTNeoModelLanguageGenerationTest(unittest.TestCase): @cached_property def model(self): return GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B").to(torch_device) @cached_property def tokenizer(self): return GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B") @slow def test_lm_generate_gpt_neo(self): for checkpointing in [True, False]: model = self.model if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) expected_output_ids = [464, 3290, 12, 3380, 4866, 286, 262, 1492, 11, 543, 318, 257, 4947, 286, 27126, 416, 262, 2739, 1772, 11] output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @slow def test_gpt_neo_sample(self): model = self.model tokenizer = self.tokenizer torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) output_ids = model.generate(input_ids, do_sample=True) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = "Today is a nice day and if you don’t get the memo here is what you can" self.assertEqual(output_str, EXPECTED_OUTPUT_STR) @slow def test_batch_generation(self): model = self.model tokenizer = self.tokenizer tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id sentences = [ "Hello, my dog is a little", "Today, I am", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a kitty. She is a very sweet and loving", "Today, I am going to talk about the best way to get a job in the", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_model_from_pretrained(self): for model_name in GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GPTNeoModel.from_pretrained(model_name) self.assertIsNotNone(model)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch gptneox model import unittest from parameterized import parameterized from transformers import autotokenizer gptneoxconfig istorchavailable setseed from transformers testingutils import requiretorch slow torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import gptneoxforcausallm gptneoxforquestionanswering gptneoxforsequenceclassification gptneoxfortokenclassification gptneoxmodel class gptneoxmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize64 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope self padtokenid vocabsize 1 def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokenlabels none if self uselabels tokenlabels idstensorself batchsize self seqlength self numlabels config self getconfig return config inputids inputmask tokenlabels def getconfigself return gptneoxconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange padtokenidself padtokenid def prepareconfigandinputsfordecoderself config inputids inputmask tokenlabels self prepareconfigandinputs config isdecoder true return config inputids inputmask tokenlabels def createandcheckmodelself config inputids inputmask model gptneoxmodelconfigconfig model totorchdevice model eval modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckmodelasdecoderself config inputids inputmask config addcrossattention true model gptneoxmodelconfig model totorchdevice model eval result modelinputids attentionmaskinputmask self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforcausallmself config inputids inputmask tokenlabels model gptneoxforcausallmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckforquestionansweringself config inputids inputmask tokenlabels config numlabels self numlabels model gptneoxforquestionansweringconfig model totorchdevice model eval result modelinputids attentionmaskinputmask self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforsequenceclassificationself config inputids inputmask tokenlabels config numlabels self numlabels model gptneoxforsequenceclassificationconfig model totorchdevice model eval sequencelabels idstensorself batchsize self typesequencelabelsize result modelinputids attentionmaskinputmask labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassificationself config inputids inputmask tokenlabels config numlabels self numlabels model gptneoxfortokenclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckdecodermodelpastlargeinputsself config inputids inputmask config isdecoder true model gptneoxforcausallmconfigconfig model totorchdevice model eval first forward pass outputs modelinputids attentionmaskinputmask usecachetrue pastkeyvalues outputs pastkeyvalues create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextmask idstensorself batchsize 3 vocabsize2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catinputmask nextmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmask outputhiddenstatestrue outputfromnopast outputfromnopasthiddenstates0 outputfrompast model nexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues outputhiddenstatestrue hiddenstates0 select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask tokenlabels configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class gptneoxmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses gptneoxmodel gptneoxforcausallm gptneoxforquestionanswering gptneoxforsequenceclassification gptneoxfortokenclassification if istorchavailable else allgenerativemodelclasses gptneoxforcausallm if istorchavailable else pipelinemodelmapping featureextraction gptneoxmodel questionanswering gptneoxforquestionanswering textclassification gptneoxforsequenceclassification textgeneration gptneoxforcausallm tokenclassification gptneoxfortokenclassification zeroshot gptneoxforsequenceclassification if istorchavailable else testpruning false testmissingkeys false testmodelparallel false testheadmasking false def setupself self modeltester gptneoxmodeltesterself self configtester configtesterself configclassgptneoxconfig hiddensize64 numattentionheads8 def testconfigself self configtester runcommontests def testmodelself config inputids inputmask tokenlabels self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfig inputids inputmask def testmodelasdecoderself config inputids inputmask tokenlabels self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckmodelasdecoderconfig inputids inputmask def testmodelasdecoderwithdefaultinputmaskself this regression test was failing with pytorch 1 3 config inputids inputmask tokenlabels self modeltester prepareconfigandinputsfordecoder inputmask none self modeltester createandcheckmodelasdecoderconfig inputids inputmask def testdecodermodelpastlargeinputsself config inputids inputmask tokenlabels self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfig inputids inputmask def testmodelforcausallmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforcausallmconfigandinputs def testmodelforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testmodelforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testmodelfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass parameterized expandlinear dynamic def testmodelropescalingself scalingtype config self modeltester prepareconfigandinputsforcommon shortinput idstensor1 10 config vocabsize longinput idstensor1 intconfig maxpositionembeddings 1 5 config vocabsize setseed42 fixed seed at init time so the two models get the same random weights originalmodel gptneoxmodelconfig originalmodel totorchdevice originalmodel eval originalshortoutput originalmodelshortinput lasthiddenstate originallongoutput originalmodellonginput lasthiddenstate setseed42 fixed seed at init time so the two models get the same random weights config ropescaling type scalingtype factor 10 0 scaledmodel gptneoxmodelconfig scaledmodel totorchdevice scaledmodel eval scaledshortoutput scaledmodelshortinput lasthiddenstate scaledlongoutput scaledmodellonginput lasthiddenstate dynamic scaling does not change the rope embeddings until it receives an input longer than the original maximum sequence length so the outputs for the short input should match if scalingtype dynamic self asserttruetorch allcloseoriginalshortoutput scaledshortoutput atol1e5 else self assertfalsetorch allcloseoriginalshortoutput scaledshortoutput atol1e5 the output should be different for long inputs self assertfalsetorch allcloseoriginallongoutput scaledlongoutput atol1e5 requiretorch class gptneoxlanguagegenerationtestunittest testcase slow def testlmgenerategptneoxself tokenizer autotokenizer frompretrainedeleutheraipythia410mdeduped for checkpointing in true false model gptneoxforcausallm frompretrainedeleutheraipythia410mdeduped if checkpointing model gradientcheckpointingenable else model gradientcheckpointingdisable model totorchdevice inputs tokenizermy favorite food is returntensorspt totorchdevice the hub repo is updated on 20230404 resulting in poor outputs see https github comhuggingfacetransformerspull24193 expectedoutput my favorite food is a good oldfashioned oldfashioned oldfashioned nni m not sure outputids model generateinputs dosamplefalse maxnewtokens20 outputstr tokenizer batchdecodeoutputids0 self assertequaloutputstr expectedoutput coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch gptneox model first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice this regression test was failing with pytorch 1 3 fixed seed at init time so the two models get the same random weights fixed seed at init time so the two models get the same random weights dynamic scaling does not change the rope embeddings until it receives an input longer than the original maximum sequence length so the outputs for the short input should match the output should be different for long inputs the hub repo is updated on 2023 04 04 resulting in poor outputs see https github com huggingface transformers pull 24193
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class GPTNeoXModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.pad_token_id = vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, input_ids, input_mask, token_labels def get_config(self): return GPTNeoXConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_decoder(self): config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() config.is_decoder = True return config, input_ids, input_mask, token_labels def create_and_check_model(self, config, input_ids, input_mask): model = GPTNeoXModel(config=config) model.to(torch_device) model.eval() _ = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder(self, config, input_ids, input_mask): config.add_cross_attention = True model = GPTNeoXModel(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm(self, config, input_ids, input_mask, token_labels): model = GPTNeoXForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering(self, config, input_ids, input_mask, token_labels): config.num_labels = self.num_labels model = GPTNeoXForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification(self, config, input_ids, input_mask, token_labels): config.num_labels = self.num_labels model = GPTNeoXForSequenceClassification(config) model.to(torch_device) model.eval() sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification(self, config, input_ids, input_mask, token_labels): config.num_labels = self.num_labels model = GPTNeoXForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_decoder_model_past_large_inputs(self, config, input_ids, input_mask): config.is_decoder = True model = GPTNeoXForCausalLM(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True) output_from_no_past = output_from_no_past["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask, token_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GPTNeoXModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (GPTNeoXForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GPTNeoXModel, "question-answering": GPTNeoXForQuestionAnswering, "text-classification": GPTNeoXForSequenceClassification, "text-generation": GPTNeoXForCausalLM, "token-classification": GPTNeoXForTokenClassification, "zero-shot": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_missing_keys = False test_model_parallel = False test_head_masking = False def setUp(self): self.model_tester = GPTNeoXModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoXConfig, hidden_size=64, num_attention_heads=8) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(config, input_ids, input_mask) def test_model_as_decoder(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_model_as_decoder_with_default_input_mask(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_decoder_model_past_large_inputs(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(config, input_ids, input_mask) def test_model_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_model_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_model_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_model_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) original_model = GPTNeoXModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = GPTNeoXModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @require_torch class GPTNeoXLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_gptneox(self): tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped") for checkpointing in [True, False]: model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped") if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) expected_output = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure" output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=20) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, expected_output)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch gptneoxjapanese model import unittest from transformers import gptneoxjapaneseconfig istorchavailable from transformers models gptneoxjapanese tokenizationgptneoxjapanese import gptneoxjapanesetokenizer from transformers testingutils import requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import gptneoxjapaneseforcausallm gptneoxjapanesemodel class gptneoxjapanesemodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatemultiplesize4 hiddenactgelu hiddendropout0 0 attentiondropout0 1 weighttyingtrue maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatemultiplesize intermediatemultiplesize self hiddenact hiddenact self hiddendropout hiddendropout self attentiondropout attentiondropout self weighttying weighttying self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokenlabels none if self uselabels tokenlabels idstensorself batchsize self seqlength self numlabels config self getconfig return config inputids inputmask tokenlabels def getconfigself return gptneoxjapaneseconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatemultiplesizeself intermediatemultiplesize hiddenactself hiddenact hiddendropoutself hiddendropout attentiondropoutself attentiondropout weighttyingself weighttying maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange def prepareconfigandinputsfordecoderself config inputids inputmask tokenlabels self prepareconfigandinputs config isdecoder true return config inputids inputmask tokenlabels def createandcheckmodelself config inputids inputmask model gptneoxjapanesemodelconfigconfig model totorchdevice model eval modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckmodelasdecoderself config inputids inputmask config addcrossattention true model gptneoxjapanesemodelconfig model totorchdevice model eval result modelinputids attentionmaskinputmask self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforcausallmself config inputids inputmask tokenlabels model gptneoxjapaneseforcausallmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckdecodermodelpastlargeinputsself config inputids inputmask config isdecoder true model gptneoxjapaneseforcausallmconfigconfig model totorchdevice model eval first forward pass outputs modelinputids attentionmaskinputmask usecachetrue pastkeyvalues outputs pastkeyvalues create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextmask idstensorself batchsize 3 vocabsize2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catinputmask nextmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmask outputhiddenstatestrue outputfromnopast outputfromnopasthiddenstates0 outputfrompast model nexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues outputhiddenstatestrue hiddenstates0 select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask tokenlabels configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class gptneoxmodeljapanesetestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses gptneoxjapanesemodel gptneoxjapaneseforcausallm if istorchavailable else allgenerativemodelclasses gptneoxjapaneseforcausallm if istorchavailable else pipelinemodelmapping featureextraction gptneoxjapanesemodel textgeneration gptneoxjapaneseforcausallm if istorchavailable else testpruning false testmissingkeys false testmodelparallel false testheadmasking false def setupself self modeltester gptneoxjapanesemodeltesterself self configtester configtesterself configclassgptneoxjapaneseconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself config inputids inputmask tokenlabels self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfig inputids inputmask def testmodelasdecoderself config inputids inputmask tokenlabels self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckmodelasdecoderconfig inputids inputmask def testmodelasdecoderwithdefaultinputmaskself this regression test was failing with pytorch 1 3 config inputids inputmask tokenlabels self modeltester prepareconfigandinputsfordecoder inputmask none self modeltester createandcheckmodelasdecoderconfig inputids inputmask def testdecodermodelpastlargeinputsself config inputids inputmask tokenlabels self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfig inputids inputmask def testmodelforcausallmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforcausallmconfigandinputs slow def testgenerationself modelid abejagptneoxjapanese2 7b prompts 100 fmt skip expectedoutputs 100 tokenizer gptneoxjapanesetokenizer frompretrainedmodelid model gptneoxjapaneseforcausallm frompretrainedmodelid predictedoutputs for prompt in prompts inputids tokenizerprompt returntensorspt inputids generatedids model generateinputids maxlength50 generatedstring tokenizer batchdecodegeneratedids skipspecialtokenstrue predictedoutputs generatedstring self assertlistequalpredictedoutputs expectedoutputs coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch gptneoxjapanese model first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice this regression test was failing with pytorch 1 3 fmt skip
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class GPTNeoXJapaneseModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_multiple_size=4, hidden_act="gelu", hidden_dropout=0.0, attention_dropout=0.1, weight_tying=True, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_multiple_size = intermediate_multiple_size self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.weight_tying = weight_tying self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, input_ids, input_mask, token_labels def get_config(self): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_multiple_size=self.intermediate_multiple_size, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, weight_tying=self.weight_tying, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() config.is_decoder = True return config, input_ids, input_mask, token_labels def create_and_check_model(self, config, input_ids, input_mask): model = GPTNeoXJapaneseModel(config=config) model.to(torch_device) model.eval() _ = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder(self, config, input_ids, input_mask): config.add_cross_attention = True model = GPTNeoXJapaneseModel(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm(self, config, input_ids, input_mask, token_labels): model = GPTNeoXJapaneseForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs(self, config, input_ids, input_mask): config.is_decoder = True model = GPTNeoXJapaneseForCausalLM(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True) output_from_no_past = output_from_no_past["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask, token_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GPTNeoXModelJapaneseTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () all_generative_model_classes = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) test_pruning = False test_missing_keys = False test_model_parallel = False test_head_masking = False def setUp(self): self.model_tester = GPTNeoXJapaneseModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoXJapaneseConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(config, input_ids, input_mask) def test_model_as_decoder(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_model_as_decoder_with_default_input_mask(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_decoder_model_past_large_inputs(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(config, input_ids, input_mask) def test_model_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) @slow def test_generation(self): model_id = "abeja/gpt-neox-japanese-2.7b" prompts = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] EXPECTED_OUTPUTS = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained(model_id) model = GPTNeoXJapaneseForCausalLM.from_pretrained(model_id) predicted_outputs = [] for prompt in prompts: input_ids = tokenizer(prompt, return_tensors="pt").input_ids generated_ids = model.generate(input_ids, max_length=50) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing tokenization testing conversion to ids without special tokens testing conversion to ids with special tokens intentionally convert some words to accommodate character fluctuations unique to japanese tokenizer has no padding token coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo add if relevant todo add if relevant todo add if relevant testing tokenization testing conversion to ids without special tokens testing conversion to ids with special tokens intentionally convert some words to accommodate character fluctuations unique to japanese tokenizer has no padding token
import json import os import unittest from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import ( VOCAB_FILES_NAMES, GPTNeoXJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class GPTNeoXJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = GPTNeoXJapaneseTokenizer test_rust_tokenizer = False from_pretrained_kwargs = {"do_clean_text": False, "add_prefix_space": False} def setUp(self): super().setUp() vocab_tokens = [ "こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|startoftext|>", "<|endoftext|>", ] emoji_tokens = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.emoji_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["emoji_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) with open(self.emoji_file, "w") as emoji_writer: emoji_writer.write(json.dumps(emoji_tokens)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return GPTNeoXJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、㔺界。😀" output_text = "こんにちは、世界。 \nこんばんは、世界。😀" return input_text, output_text def get_clean_sequence(self, tokenizer): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_pretokenized_inputs(self): pass def test_maximum_encoding_length_pair_input(self): pass def test_maximum_encoding_length_single_input(self): pass def test_full_tokenizer(self): tokenizer = self.get_tokenizer() input_text = "こんにちは、世界。 こんばんは、㔺界。" expected_token = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"] tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, expected_token) expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] input_ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(input_ids, expected_ids) input_tokens = tokens + [tokenizer.unk_token] expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] input_ids = tokenizer.convert_tokens_to_ids(input_tokens) self.assertListEqual(input_ids, expected_ids) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("abeja/gpt-neox-japanese-2.7b") ids_1 = tokenizer.encode("ありがとう。", add_special_tokens=False) ids_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(ids_1) encoded_pair = tokenizer.build_inputs_with_special_tokens(ids_1, ids_2) assert encoded_sentence == ids_1 assert encoded_pair == ids_1 + ids_2 def test_conversion_reversible(self): pass def test_padding_different_model_input_name(self): pass
codingutf8 2022 hugging face inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token s tokenid 1 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 unk self assertequalvocabkeys1 s self assertequalvocabkeys1 j self assertequallenvocabkeys 2000 def testvocabsizeself self assertequalself gettokenizer vocabsize 2000 def testfulltokenizerself tokenizer gptsw3tokenizersamplevocab tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a t est self assertlistequaltokenizer converttokenstoidstokens 465 287 265 631 842 tokens tokenizer tokenizei was born in 92000 and this is fals fmt off self assertlistequal tokens i was bor n in 0x39 2 0 0 0 and this is f al s 0xc3 0xa9 fmt on ids tokenizer converttokenstoidstokens self assertlistequal ids 262 272 1525 286 271 268 60 916 633 633 633 259 266 301 287 384 367 263 198 172 260 backtokens tokenizer convertidstotokensids fmt off self assertlistequal backtokens i was bor n in 0x39 2 0 0 0 and this is f al s 0xc3 0xa9 fmt on def testfastencodedecodeself tokenizer gptsw3tokenizersamplevocab texts this is a test i was born in 92000 and this is fals expectedidslist 465 287 265 631 842 262 272 1525 286 271 268 60 916 633 633 633 259 266 301 287 384 367 263 198 172 260 test that encodefast returns the same as tokenize converttokenstoids for text expectedids in ziptexts expectedidslist self assertlistequaltokenizer encodefasttext expectedids test that decodefast returns the input text for text tokenids in ziptexts expectedidslist self assertequaltokenizer decodefasttokenids text slow def testtokenizerintegrationself sequences pythondef fibonaccinn if n 0 n print incorrect input hey there how are you doing this fine day this is a text with a trailing spaces followed by a dot hj svjs lillebrr det r inget fel p mr cool expectedencoding inputids 63423 5 6811 14954 282 816 3821 63466 63425 63462 18 63978 678 301 1320 63423 63455 63458 18 63982 4246 3940 1901 47789 5547 18994 19630 1100 63446 1342 633 544 4488 593 5102 2416 63495 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1652 428 268 1936 515 268 58593 22413 9106 546 268 33213 63979 698 0 0 0 0 0 0 0 0 0 0 0 0 55130 63450 924 63449 2249 4062 1558 318 63504 21498 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 509 377 2827 2559 332 6575 63443 26801 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 tokentypeids 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnameaiswedengptsw3126m sequencessequences requirejinja def testtokenizationforchatself tokenizer gptsw3tokenizersamplevocab this is in english but it s just here to make sure the chat control tokens are being added properly testchats role system content you are a helpful chatbot role user content hello role system content you are a helpful chatbot role user content hello role assistant content nice to meet you role assistant content nice to meet you role user content hello tokenizedchats tokenizer applychattemplatetestchat for testchat in testchats fmt off expectedtokens 2000 1 575 541 419 530 339 265 878 708 727 275 347 541 260 1 968 263 314 419 366 354 294 360 1 575 541 419 2000 1 575 541 419 530 339 265 878 708 727 275 347 541 260 1 968 263 314 419 366 354 294 360 1 575 541 419 984 429 281 264 1261 291 260 1 575 541 419 2000 1 575 541 419 984 429 281 264 1261 291 260 1 968 263 314 419 366 354 294 360 1 575 541 419 fmt on for tokenizedchat expectedtokens in ziptokenizedchats expectedtokens self assertlistequaltokenizedchat expectedtokens coding utf 8 2022 hugging face inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token fmt off fmt on fmt off fmt on test that encode_fast returns the same as tokenize convert_tokens_to_ids test that decode_fast returns the input text fmt skip this is in english but it s just here to make sure the chat control tokens are being added properly fmt off fmt on
import unittest from transformers import GPTSw3Tokenizer from transformers.testing_utils import get_tests_dir, require_jinja, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model") @require_sentencepiece @require_tokenizers class GPTSw3TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = GPTSw3Tokenizer test_rust_tokenizer = False test_sentencepiece = True test_sentencepiece_ignore_case = False def setUp(self): super().setUp() tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB, eos_token="<unk>", bos_token="<unk>", pad_token="<unk>") tokenizer.save_pretrained(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_text = "This is a test" output_text = "This is a test" return input_text, output_text def test_convert_token_and_id(self): token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[-1], "j") self.assertEqual(len(vocab_keys), 2_000) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 2_000) def test_full_tokenizer(self): tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [465, 287, 265, 631, 842]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) def test_fast_encode_decode(self): tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB) texts = ["This is a test", "I was born in 92000, and this is falsé."] expected_ids_list = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] for text, expected_ids in zip(texts, expected_ids_list): self.assertListEqual(tokenizer.encode_fast(text), expected_ids) for text, token_ids in zip(texts, expected_ids_list): self.assertEqual(tokenizer.decode_fast(token_ids), text) @slow def test_tokenizer_integration(self): sequences = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] expected_encoding = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="AI-Sweden/gpt-sw3-126m", sequences=sequences, ) @require_jinja def test_tokenization_for_chat(self): tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB) test_chats = [ [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}], [ {"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Nice to meet you."}, ], [{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}], ] tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats] expected_tokens = [ [2000, 1, 575, 541, 419, 530, 339, 265, 878, 708, 727, 275, 347, 541, 260, 1, 968, 263, 314, 419, 366, 354, 294, 360, 1, 575, 541, 419], [2000, 1, 575, 541, 419, 530, 339, 265, 878, 708, 727, 275, 347, 541, 260, 1, 968, 263, 314, 419, 366, 354, 294, 360, 1, 575, 541, 419, 984, 429, 281, 264, 1261, 291, 260, 1, 575, 541, 419], [2000, 1, 575, 541, 419, 984, 429, 281, 264, 1261, 291, 260, 1, 968, 263, 314, 419, 366, 354, 294, 360, 1, 575, 541, 419] ] for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens): self.assertListEqual(tokenized_chat, expected_tokens)
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite from common since attentionmask in combination with causalmask behaves slighly differently prepare inputs load corresponding pytorch class overwrite from common since attentionmask in combination with causalmask behaves slighly differently prepare inputs load corresponding pytorch class make sure weights are tied in pytorch 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite from common since attention_mask in combination with causal_mask behaves slighly differently prepare inputs load corresponding pytorch class skip the flax at the beginning overwrite from common since attention_mask in combination with causal_mask behaves slighly differently prepare inputs load corresponding pytorch class skip the flax at the beginning make sure weights are tied in pytorch
import tempfile import unittest import numpy as np import transformers from transformers import GPT2Tokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class FlaxGPTJModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, rotary_dim=4, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.rotary_dim = rotary_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = GPTJConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=False, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, ) return (config, input_ids, input_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) attention_mask_cache = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxGPTJModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () all_generative_model_classes = (FlaxGPTJForCausalLM,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxGPTJModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) @tooslow def test_batch_generation(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2", pad_token="<|endoftext|>", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="np", padding=True, truncation=True) model = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") model.do_sample = False model.config.pad_token_id = model.config.eos_token_id jit_generate = jax.jit(model.generate) output_sequences = jit_generate( inputs["input_ids"], attention_mask=inputs["attention_mask"], pad_token_id=tokenizer.pad_token_id ).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of text.\n\nI'm trying to get the text of the", "Hey, I'm a little late to the party. I'm going to", ] self.assertListEqual(output_string, expected_string) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) @tooslow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("EleutherAI/gpt-j-6B") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer marked as tooslow due to gpu oom but still useful to run locally requires 39gb of ram the dog is a man s best friend it is a loyal companion and it is a friend forces the generation to happen on cpu to avoid gpurelated quirks define pad token eos token 50256 use different length sentences to test batching confirms that we get the expected results with leftpadded beam search confirms that leftpadding is working properly confirms that xla is working properly coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license none is the input for past first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer marked as tooslow due to gpu oom but still useful to run locally requires 39gb of ram the dog the dog is a man s best friend it is a loyal companion and it is a friend fmt skip forces the generation to happen on cpu to avoid gpu related quirks define pad token eos token 50256 use different length sentences to test batching confirms that we get the expected results with left padded beam search confirms that left padding is working properly confirms that xla is working properly
from __future__ import annotations import unittest from transformers import AutoTokenizer, GPTJConfig, is_tf_available from transformers.testing_utils import require_tf, slow, tooslow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.gptj.modeling_tf_gptj import ( TFGPTJForCausalLM, TFGPTJForQuestionAnswering, TFGPTJForSequenceClassification, TFGPTJModel, shape_list, ) class TFGPTJModelTester: def __init__(self, parent): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_token_type_ids = True self.use_input_mask = True self.use_labels = True self.use_mc_token_ids = True self.vocab_size = 99 self.hidden_size = 32 self.rotary_dim = 4 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.bos_token_id = self.vocab_size - 1 self.eos_token_id = self.vocab_size - 1 self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = GPTJConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, return_dict=True, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def create_and_check_gptj_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPTJModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) inputs = [input_ids, None, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_gptj_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPTJModel(config=config) outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_gptj_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = TFGPTJModel(config=config) half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) output, past_key_values = model(input_ids, attention_mask=attn_mask).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat([attn_mask, tf.ones((shape_list(attn_mask)[0], 1), dtype=tf.int32)], axis=1) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-12) def create_and_check_gptj_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = TFGPTJModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] token_type_ids = token_type_ids[:1, :] self.batch_size = 1 outputs = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_token_types = ids_tensor((self.batch_size, 3), self.type_vocab_size) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past_key_values, )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_gptj_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPTJForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class TFGPTJModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFGPTJForCausalLM, TFGPTJForSequenceClassification, TFGPTJForQuestionAnswering, TFGPTJModel) if is_tf_available() else () ) all_generative_model_classes = (TFGPTJForCausalLM,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFGPTJModel, "question-answering": TFGPTJForQuestionAnswering, "text-classification": TFGPTJForSequenceClassification, "text-generation": TFGPTJForCausalLM, "zero-shot": TFGPTJForSequenceClassification, } if is_tf_available() else {} ) test_onnx = False test_pruning = False test_missing_keys = False test_head_masking = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): return True return False def setUp(self): self.model_tester = TFGPTJModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTJConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_gptj_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model(*config_and_inputs) def test_gptj_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model_past(*config_and_inputs) def test_gptj_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model_attention_mask_past(*config_and_inputs) def test_gptj_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model_past_large_inputs(*config_and_inputs) def test_gptj_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_lm_head_model(*config_and_inputs) @slow @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) > 0, "skip testing on GPU for now to avoid GPU OOM.", ) def test_model_from_pretrained(self): model = TFGPTJModel.from_pretrained("EleutherAI/gpt-j-6B", from_pt=True) self.assertIsNotNone(model) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor.") def test_resize_token_embeddings(self): super().test_resize_token_embeddings() @require_tf @tooslow class TFGPTJModelLanguageGenerationTest(unittest.TestCase): def test_lm_generate_gptj(self): model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", from_pt=True) input_ids = tf.convert_to_tensor([[464, 3290]], dtype=tf.int32) expected_output_ids = [464, 3290, 318, 257, 582, 338, 1266, 1545, 13, 632, 318, 257, 9112, 15185, 11, 290, 340, 318, 257, 1545] output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) def test_gptj_sample(self): tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B", revision="float16") model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", from_pt=True) tokenized = tokenizer("Today is a nice day and", return_tensors="tf") with tf.device(":/CPU:0"): output_ids = model.generate(**tokenized, do_sample=True, seed=[42, 0]) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = "Today is a nice day and I’m going to go for a walk. I’" self.assertEqual(output_str, EXPECTED_OUTPUT_STR) def _get_beam_search_test_objects(self): model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", from_pt=True) tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B", revision="float16") tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id sentences = [ "Hello, my dog is a little", "Today, I", ] expected_output_sentences = [ "Hello, my dog is a little over a year old and has been diagnosed with hip dysplasia", "Today, I’m going to be talking about a topic that’", ] return model, tokenizer, sentences, expected_output_sentences def test_batch_beam_search(self): model, tokenizer, sentences, expected_output_sentences = self._get_beam_search_test_objects() inputs = tokenizer(sentences, return_tensors="tf", padding=True) outputs = model.generate(**inputs, do_sample=False, num_beams=2) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(expected_output_sentences, batch_out_sentence) def test_batch_left_padding(self): model, tokenizer, sentences, expected_output_sentences = self._get_beam_search_test_objects() inputs = tokenizer(sentences, return_tensors="tf", padding=True) inputs_non_padded = tokenizer(sentences[0], return_tensors="tf") output_non_padded = model.generate(**inputs_non_padded, do_sample=False, num_beams=2) num_paddings = ( shape_list(inputs_non_padded["input_ids"])[-1] - tf.reduce_sum(tf.cast(inputs["attention_mask"][-1], tf.int64)).numpy() ) inputs_padded = tokenizer(sentences[1], return_tensors="tf") output_padded = model.generate( **inputs_padded, do_sample=False, num_beams=2, max_length=model.config.max_length - num_paddings ) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) self.assertListEqual(expected_output_sentences, [non_padded_sentence, padded_sentence]) def test_xla_beam_search(self): model, tokenizer, sentences, expected_output_sentences = self._get_beam_search_test_objects() inputs = tokenizer(sentences, return_tensors="tf", padding=True) xla_generate = tf.function(model.generate, jit_compile=True) outputs_xla = xla_generate(**inputs, do_sample=False, num_beams=2) xla_sentence = tokenizer.batch_decode(outputs_xla, skip_special_tokens=True) self.assertListEqual(expected_output_sentences, xla_sentence)
codingutf8 2023 toshiyuki sakamototanreinama and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for common tests for common tests the small gptsanjapanese model needs higher percentages for cpump tests todo fix the failed tests when this model gets more usage todo fix reordercache is not implemented for this model todo check this the small gptsanjapanese model needs higher percentages for cpump tests output of original model created with meshtensoflow fmt off fmt on set deterministically use different length sentences to test batching output of original model created with meshtensoflow set deterministically spout from uniform and onehot set deterministically coding utf 8 2023 toshiyuki sakamoto tanreinama and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for common tests for common tests the small gptsan_japanese model needs higher percentages for cpu mp tests todo fix the failed tests when this model gets more usage todo fix _reorder_cache is not implemented for this model todo check this the small gptsan_japanese model needs higher percentages for cpu mp tests output of original model created with mesh tensoflow fmt off fmt on set deterministically use different length sentences to test batching output of original model created with mesh tensoflow set deterministically spout from uniform and one hot uniform fmt skip set deterministically
import unittest import numpy as np from transformers import ( GPTSanJapaneseConfig, GPTSanJapaneseForConditionalGeneration, GPTSanJapaneseModel, GPTSanJapaneseTokenizer, is_torch_available, ) from transformers.generation import GenerationConfig from transformers.testing_utils import require_torch, slow, tooslow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin class GPTSanJapaneseTester: def __init__( self, parent, vocab_size=99, batch_size=13, num_contexts=7, is_training=True, hidden_size=32, ext_size=42, num_hidden_layers=2, num_ext_layers=2, num_attention_heads=4, num_experts=2, d_ff=32, d_ext=80, d_spout=33, dropout_rate=0.0, layer_norm_epsilon=1e-6, expert_capacity=100, router_jitter_noise=0.0, ): self.vocab_size = vocab_size self.parent = parent self.batch_size = batch_size self.num_contexts = num_contexts self.seq_length = self.num_contexts self.is_training = is_training self.hidden_size = hidden_size self.num_ext_layers = num_ext_layers self.ext_size = ext_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_experts = num_experts self.d_ff = d_ff self.d_ext = d_ext self.d_spout = d_spout self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.expert_capacity = expert_capacity self.router_jitter_noise = router_jitter_noise def get_large_model_config(self): return GPTSanJapaneseConfig.from_pretrained("Tanrei/GPTSAN-japanese") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() return (config, input_ids) def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() return (config, {"input_ids": input_ids}) def get_config(self): return GPTSanJapaneseConfig( vocab_size=self.vocab_size, num_contexts=self.seq_length, d_model=self.hidden_size, d_ff=self.d_ff, d_ext=self.d_ext, d_spout=self.d_spout, num_switch_layers=self.num_hidden_layers - self.num_ext_layers, num_ext_layers=self.num_ext_layers, num_heads=self.num_attention_heads, num_experts=self.num_experts, expert_capacity=self.expert_capacity, dropout_rate=self.dropout_rate, layer_norm_epsilon=self.layer_norm_epsilon, router_jitter_noise=self.router_jitter_noise, ) def create_and_check_model( self, config, input_ids, ): model = GPTSanJapaneseForConditionalGeneration(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, ) self.parent.assertIsNotNone(result) @require_torch class GPTSanJapaneseTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GPTSanJapaneseModel,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": GPTSanJapaneseForConditionalGeneration, "feature-extraction": GPTSanJapaneseForConditionalGeneration, "summarization": GPTSanJapaneseForConditionalGeneration, "text2text-generation": GPTSanJapaneseForConditionalGeneration, "translation": GPTSanJapaneseForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False is_encoder_decoder = False test_pruning = False test_headmasking = False test_cpu_offload = False test_disk_offload = False test_save_load_fast_init_to_base = False test_training = False model_split_percents = [0.8, 0.9] def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "SummarizationPipelineTests": return True elif pipeline_test_casse_name == "Text2TextGenerationPipelineTests": return True return False def setUp(self): self.model_tester = GPTSanJapaneseTester(self) self.config_tester = ConfigTester(self, config_class=GPTSanJapaneseConfig, d_model=37) def test_config(self): GPTSanJapaneseConfig() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip( reason="skip for now as the computed `max_memory` by `model_split_percents` in the test method will be changed inside `from_pretrained`" ) def test_model_parallelism(self): super().test_model_parallelism() @require_torch class GPTSanJapaneseForConditionalGenerationTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (GPTSanJapaneseForConditionalGeneration,) if is_torch_available() else () fx_compatible = False is_encoder_decoder = False test_pruning = False test_headmasking = False test_cpu_offload = False test_disk_offload = False model_split_percents = [0.8, 0.9] def setUp(self): self.model_tester = GPTSanJapaneseTester(self) self.config_tester = ConfigTester(self, config_class=GPTSanJapaneseConfig, d_model=37) def test_config(self): GPTSanJapaneseConfig() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip( reason="skip for now as the computed `max_memory` by `model_split_percents` in the test method will be changed inside `from_pretrained`" ) def test_model_parallelism(self): super().test_model_parallelism() @slow def test_logits(self): model = GPTSanJapaneseForConditionalGeneration.from_pretrained("Tanrei/GPTSAN-japanese") tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") input_ids = tokenizer.encode("武田信玄は", return_tensors="pt") outputs = model(input_ids) output_logits = outputs.logits.detach().cpu().numpy() target = [ [-12.037839889526367, -12.433061599731445, -14.333840370178223, -12.450345993041992, -11.1661376953125, -11.930137634277344, -10.659740447998047, -12.909574508666992, -13.241043090820312, -13.398579597473145, -11.107524871826172, -12.3685941696167, -22.97943115234375, -10.481067657470703, -12.484030723571777, -12.807360649108887, -14.769700050354004, -12.233579635620117, -13.428145408630371, -22.624177932739258], [-7.511149883270264, -8.281851768493652, -7.943127155303955, -7.55021333694458, -6.49869966506958, -7.586796283721924, -6.978085994720459, -7.839145183563232, -8.21964168548584, -8.695091247558594, -6.706910610198975, -6.6585798263549805, -19.565698623657227, -5.353842735290527, -8.350686073303223, -8.039388656616211, -10.856569290161133, -7.75154447555542, -8.819022178649902, -19.51532745361328], [-9.73066234588623, -10.223922729492188, -9.932981491088867, -11.857836723327637, -7.662626266479492, -11.13529109954834, -7.765097618103027, -11.472923278808594, -9.543149948120117, -11.905633926391602, -9.366164207458496, -11.5734281539917, -23.699003219604492, -9.429590225219727, -10.42839241027832, -10.585240364074707, -10.94771957397461, -11.095416069030762, -10.390240669250488, -23.769372940063477], [-9.728265762329102, -9.859712600708008, -10.09729290008545, -9.678522109985352, -6.879519939422607, -9.68487548828125, -4.2803425788879395, -10.018914222717285, -9.308445930480957, -10.63394546508789, -8.083646774291992, -9.06301498413086, -21.904266357421875, -8.90160846710205, -8.841876029968262, -11.856719970703125, -12.079398155212402, -11.233753204345703, -10.177338600158691, -21.87256622314453], [-9.669764518737793, -9.614198684692383, -9.814510345458984, -9.996501922607422, -11.375690460205078, -10.113405227661133, -10.546867370605469, -10.04369068145752, -10.907809257507324, -10.504216194152832, -11.129199028015137, -10.151124000549316, -21.96586799621582, -9.086349487304688, -11.730339050292969, -10.460667610168457, -10.298049926757812, -10.784148216247559, -10.840693473815918, -22.03152847290039], ] target = np.array(target).flatten() predict = output_logits[0, :, :20].flatten() def check(a, b, epsilon=5e-4): return abs(a - b) < epsilon * max(abs(a), abs(b)) self.assertTrue(np.all([check(target[i], predict[i]) for i in range(len(target))])) @slow def test_batch_generation(self): model = GPTSanJapaneseForConditionalGeneration.from_pretrained("Tanrei/GPTSAN-japanese") tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") model.to(torch_device) generation_config = GenerationConfig.from_pretrained("Tanrei/GPTSAN-japanese") generation_config.top_k = 1 sentences = [ "甲斐なら武田と言うほど", "織田信長は、", ] tokenizer.padding_side = "left" inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) self.assertNotEqual(inputs["attention_mask"][0].numpy().tolist(), inputs["attention_mask"][1].numpy().tolist()) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), max_new_tokens=3, generation_config=generation_config, ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate( input_ids=inputs_non_padded, max_new_tokens=3, generation_config=generation_config ) inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=3, generation_config=generation_config) self.assertNotEqual(inputs_non_padded.shape, inputs_padded.shape) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "甲斐なら武田と言うほど甲斐の武田", "織田信長は、このような", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(batch_out_sentence, [non_padded_sentence, padded_sentence]) @tooslow def test_sample(self): model = GPTSanJapaneseForConditionalGeneration.from_pretrained("Tanrei/GPTSAN-japanese") tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") target = [ ("武田信玄は", 35675), ("武田信玄は、", 45), ("武田信玄は、この", 29), ("武田信玄は、このよう", 30642), ("武田信玄は、このような", 35680), ("武田信玄は、このような「", 8640), ("武田信玄は、このような「武田", 31617), ("武田信玄は、このような「武田家", 30646), ("武田信玄は、このような「武田家の", 31617), ("武田信玄は、このような「武田家の家", 31381), ] for input, output in target: input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model(input_ids) output_logits = outputs.logits.detach().cpu().numpy()[0] output_id = np.argmax(output_logits[-1]) self.assertEqual(output_id, output) @slow def test_spout_generation(self): model = GPTSanJapaneseForConditionalGeneration.from_pretrained("Tanrei/GPTSAN-japanese") tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") model.to(torch_device) generation_config = GenerationConfig.from_pretrained("Tanrei/GPTSAN-japanese") generation_config.top_k = 1 input_text = "武田信玄は、" input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(torch_device) input_ids_batch = tokenizer([input_text, input_text], return_tensors="pt").input_ids.to(torch_device) spouts = [ [0.87882208, 0.38426396, 0.33220248, 0.43890406, 0.16562252, 0.04803985, 0.211572 , 0.23188473, 0.37153068, 0.7836377 , 0.02160172, 0.38761719, 0.75290772, 0.90198857, 0.34365777, 0.64168169, 0.44318471, 0.14575746, 0.92562881, 0.40812148, 0.29019122, 0.88861599, 0.65524846, 0.43563456, 0.38177187, 0.70832965, 0.81527892, 0.68832812, 0.38833192, 0.4561522 , 0.14828817, 0.47248213, 0.54357335, 0.82009566, 0.1338884 , 0.02755417, 0.19764677, 0.2422084 , 0.04757674, 0.65409606, 0.0824589 , 0.03304383, 0.94387689, 0.98764509, 0.82433901, 0.27646741, 0.64907493, 0.76009406, 0.30087915, 0.17904689, 0.41601714, 0.67046398, 0.10422822, 0.08447374, 0.07354344, 0.61423565, 0.70284866, 0.7532333 , 0.1972038 , 0.29575659, 0.90583886, 0.29265307, 0.50000175, 0.70407655, 0.889363 , 0.81904418, 0.66829128, 0.64468815, 0.56563723, 0.85601875, 0.94924672, 0.00166762, 0.25220643, 0.74540219, 0.67993247, 0.1549675 , 0.39385352, 0.92153607, 0.63745931, 0.27759043, 0.84702295, 0.65904271, 0.58676614, 0.8666936 , 0.39607438, 0.79954983, 0.42220697, 0.39650381, 0.7849864 , 0.56150201, 0.15678925, 0.14746032, 0.34542114, 0.47026783, 0.11956489, 0.25421435, 0.33788901, 0.68934842, 0.36424685, 0.71737898, 0.38983449, 0.94393779, 0.39575588, 0.36616553, 0.87104665, 0.64630203, 0.22516905, 0.88270804, 0.15031338, 0.75144345, 0.46459025, 0.85396454, 0.86355643, 0.65139851, 0.70266061, 0.30241389, 0.81056497, 0.88865969, 0.38773807, 0.70635849, 0.90718459, 0.43245789, 0.28000654, 0.45935562, 0.08773519, 0.9552151 , 0.93901511, 0.22489288], [1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], ] output1 = model.generate( input_ids=input_ids, spout=spouts[0], max_new_tokens=20, generation_config=generation_config, ) output2 = model.generate( input_ids=input_ids, spout=spouts[1], max_new_tokens=20, generation_config=generation_config, ) output3 = model.generate( input_ids=input_ids_batch, spout=spouts, max_new_tokens=20, generation_config=generation_config, ) out1_sentence = tokenizer.decode(output1[0]) out2_sentence = tokenizer.decode(output2[0]) batch_out_sentence = tokenizer.batch_decode(output3) expected_output_sentence = [ "武田信玄は、武田氏の滅亡後、武田氏の居城であった甲斐武田氏の居城である", "武田信玄は、武田家の滅亡を防ぐため、武田家の家臣である武田信虎を討", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(batch_out_sentence, [out1_sentence, out2_sentence]) @slow def test_prefix_lm_generation(self): model = GPTSanJapaneseForConditionalGeneration.from_pretrained("Tanrei/GPTSAN-japanese") tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") model.to(torch_device) generation_config = GenerationConfig.from_pretrained("Tanrei/GPTSAN-japanese") generation_config.top_k = 1 prefix_text_1 = "武田信玄" prefix_text_2 = "織田信長" input_text_1 = "は、" input_text_2 = "が、" input_tok_1 = tokenizer(input_text_1, prefix_text=prefix_text_1, return_tensors="pt") input_tok_2 = tokenizer(input_text_2, prefix_text=prefix_text_2, return_tensors="pt") input_tok_3 = tokenizer([[prefix_text_1, input_text_1], [prefix_text_2, input_text_2]], return_tensors="pt") output1 = model.generate( input_ids=input_tok_1.input_ids.to(torch_device), token_type_ids=input_tok_1.token_type_ids.to(torch_device), max_new_tokens=20, generation_config=generation_config, ) output2 = model.generate( input_ids=input_tok_2.input_ids.to(torch_device), token_type_ids=input_tok_2.token_type_ids.to(torch_device), max_new_tokens=20, generation_config=generation_config, ) output3 = model.generate( input_ids=input_tok_3.input_ids.to(torch_device), token_type_ids=input_tok_3.token_type_ids.to(torch_device), attention_mask=input_tok_3.attention_mask.to(torch_device), max_new_tokens=20, generation_config=generation_config, ) out1_sentence = tokenizer.decode(output1[0]) out2_sentence = tokenizer.decode(output2[0]) batch_out_sentence = tokenizer.batch_decode(output3) expected_output_sentence = [ "武田信玄は、武田氏の祖である武田信虎を、その子・武田信友を擁して", "織田信長が、織田信長の妻・お市の方を妻として迎えたという逸話が残", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(batch_out_sentence, [out1_sentence, out2_sentence])
codingutf8 2023 toshiyuki sakamototanreinama and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models gptneoxjapanese testtokenizationgptneoxjapanese gptneoxjapanesetokenizationtest getinputoutputtexts copied from tests models gptneoxjapanese testtokenizationgptneoxjapanese gptneoxjapanesetokenizationtest getcleansequence copied from tests models gptneoxjapanese testtokenizationgptneoxjapanese gptneoxjapanesetokenizationtest testpretokenizedinputs copied from tests models gptneoxjapanese testtokenizationgptneoxjapanese gptneoxjapanesetokenizationtest testmaximumencodinglengthpairinput copied from tests models gptneoxjapanese testtokenizationgptneoxjapanese gptneoxjapanesetokenizationtest testmaximumencodinglengthsingleinput copied from tests models gptneoxjapanese testtokenizationgptneoxjapanese gptneoxjapanesetokenizationtest testfulltokenizer testing tokenization testing conversion to ids without special tokens testing conversion to ids with special tokens testing tokenization testing tokenization testing tokenization fmt off fmt on copied from tests models gptneoxjapanese testtokenizationgptneoxjapanese gptneoxjapanesetokenizationtest testconversionreversible intentionally convert some words to accommodate character fluctuations unique to japanese copied from tests models gptneoxjapanese testtokenizationgptneoxjapanese gptneoxjapanesetokenizationtest testpaddingdifferentmodelinputname tokenizer has no padding token this is in english but it s just here to make sure the chat control tokens are being added properly fmt off fmt on coding utf 8 2023 toshiyuki sakamoto tanreinama and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fmt skip copied from tests models gpt_neox_japanese test_tokenization_gpt_neox_japanese gptneoxjapanesetokenizationtest get_input_output_texts copied from tests models gpt_neox_japanese test_tokenization_gpt_neox_japanese gptneoxjapanesetokenizationtest get_clean_sequence copied from tests models gpt_neox_japanese test_tokenization_gpt_neox_japanese gptneoxjapanesetokenizationtest test_pretokenized_inputs todo add if relevant copied from tests models gpt_neox_japanese test_tokenization_gpt_neox_japanese gptneoxjapanesetokenizationtest test_maximum_encoding_length_pair_input todo add if relevant copied from tests models gpt_neox_japanese test_tokenization_gpt_neox_japanese gptneoxjapanesetokenizationtest test_maximum_encoding_length_single_input todo add if relevant copied from tests models gpt_neox_japanese test_tokenization_gpt_neox_japanese gptneoxjapanesetokenizationtest test_full_tokenizer testing tokenization testing conversion to ids without special tokens testing conversion to ids with special tokens testing tokenization testing tokenization testing tokenization seg token seg token fmt off fmt on copied from tests models gpt_neox_japanese test_tokenization_gpt_neox_japanese gptneoxjapanesetokenizationtest test_conversion_reversible intentionally convert some words to accommodate character fluctuations unique to japanese copied from tests models gpt_neox_japanese test_tokenization_gpt_neox_japanese gptneoxjapanesetokenizationtest test_padding_different_model_input_name tokenizer has no padding token this is in english but it s just here to make sure the chat control tokens are being added properly fmt off fmt on
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_jinja, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class GPTSanJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = GPTSanJapaneseTokenizer test_rust_tokenizer = False from_pretrained_kwargs = {"do_clean_text": False, "add_prefix_space": False} def setUp(self): super().setUp() vocab_tokens = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"] emoji_tokens = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.emoji_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["emoji_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) with open(self.emoji_file, "w") as emoji_writer: emoji_writer.write(json.dumps(emoji_tokens)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、㔺界。😀" output_text = "こんにちは、世界。 \nこんばんは、世界。😀" return input_text, output_text def get_clean_sequence(self, tokenizer): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_pretokenized_inputs(self): pass def test_maximum_encoding_length_pair_input(self): pass def test_maximum_encoding_length_single_input(self): pass def test_full_tokenizer(self): tokenizer = self.get_tokenizer() input_text = "こんにちは、世界。 こんばんは、㔺界。" expected_token = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"] tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, expected_token) expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] input_ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(input_ids, expected_ids) input_tokens = tokens + [tokenizer.unk_token] expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] input_ids = tokenizer.convert_tokens_to_ids(input_tokens) self.assertListEqual(input_ids, expected_ids) def test_token_bagging(self): tokenizer = self.get_tokenizer() input_text = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。" expected_text = "こんにちは、、、、世界。こんばんは、、、、世界。" tokens = tokenizer.encode(input_text) output_text = tokenizer.decode(tokens) self.assertEqual(output_text, expected_text) @slow def test_prefix_input(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") prefix_text = "こんにちは、世界。" input_text = "こんばんは、㔺界。😀" expected_text = "こんにちは、世界。こんばんは、世界。😀" tokens_1 = tokenizer.encode(prefix_text + input_text) tokens_2 = tokenizer.encode("", prefix_text=prefix_text + input_text) tokens_3 = tokenizer.encode(input_text, prefix_text=prefix_text) output_text_1 = tokenizer.decode(tokens_1) output_text_2 = tokenizer.decode(tokens_2) output_text_3 = tokenizer.decode(tokens_3) self.assertEqual(output_text_1, expected_text) self.assertEqual(output_text_2, expected_text) self.assertEqual(output_text_3, expected_text) @slow def test_token_type_ids(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") prefix_text = "こんにちは、世界。" input_text = "こんばんは、㔺界。😀" len_prefix = len(tokenizer.encode(prefix_text)) - 2 len_text = len(tokenizer.encode(input_text)) - 2 expected_mask_1 = [1] + [0] * (len_prefix + len_text + 1) expected_mask_2 = [1] * (len_prefix + len_text + 1) + [0] expected_mask_3 = [1] + [1] * (len_prefix) + [0] * (len_text + 1) type_id_1 = tokenizer(prefix_text + input_text).token_type_ids type_id_2 = tokenizer("", prefix_text=prefix_text + input_text).token_type_ids type_id_3 = tokenizer(input_text, prefix_text=prefix_text).token_type_ids self.assertListEqual(type_id_1, expected_mask_1) self.assertListEqual(type_id_2, expected_mask_2) self.assertListEqual(type_id_3, expected_mask_3) @slow def test_prefix_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") x_token_1 = tokenizer.encode("あンいワ") x_token_2 = tokenizer.encode("", prefix_text="あンいワ") x_token_3 = tokenizer.encode("いワ", prefix_text="あン") self.assertEqual(tokenizer.decode(x_token_1), tokenizer.decode(x_token_2)) self.assertEqual(tokenizer.decode(x_token_1), tokenizer.decode(x_token_3)) self.assertNotEqual(x_token_1, x_token_2) self.assertNotEqual(x_token_1, x_token_3) self.assertEqual(x_token_1[1], x_token_2[-1]) self.assertEqual(x_token_1[1], x_token_3[3]) @slow def test_batch_encode(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") input_pairs = [["武田信玄", "は、"], ["織田信長", "の配下の、"]] x_token = tokenizer(input_pairs, padding=True) x_token_2 = tokenizer.batch_encode_plus(input_pairs, padding=True) expected_outputs = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]] expected_typeids = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] expected_attmask = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] self.assertListEqual(x_token.input_ids, expected_outputs) self.assertListEqual(x_token.token_type_ids, expected_typeids) self.assertListEqual(x_token.attention_mask, expected_attmask) self.assertListEqual(x_token_2.input_ids, expected_outputs) self.assertListEqual(x_token_2.token_type_ids, expected_typeids) self.assertListEqual(x_token_2.attention_mask, expected_attmask) def test_conversion_reversible(self): pass def test_padding_different_model_input_name(self): pass @require_jinja def test_tokenization_for_chat(self): tokenizer = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese") test_chats = [ [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}], [ {"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Nice to meet you."}, ], [{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}], ] tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats] expected_tokens = [ [35993, 35998, 35637, 35659, 35665, 35716, 35645, 35662, 35649, 35716, 35645, 35716, 35652, 35649, 35656, 35660, 35650, 35665, 35656, 35716, 35647, 35652, 35645, 35664, 35646, 35659, 35664, 35595, 35716, 35999, 35993, 35998, 35620, 35649, 35656, 35656, 35659, 35582, 35716, 35999], [35993, 35998, 35637, 35659, 35665, 35716, 35645, 35662, 35649, 35716, 35645, 35716, 35652, 35649, 35656, 35660, 35650, 35665, 35656, 35716, 35647, 35652, 35645, 35664, 35646, 35659, 35664, 35595, 35716, 35999, 35993, 35998, 35620, 35649, 35656, 35656, 35659, 35582, 35716, 35999, 35993, 35998, 35626, 35653, 35647, 35649, 35716, 35664, 35659, 35716, 35657, 35649, 35649, 35664, 35716, 35669, 35659, 35665, 35595, 35716, 35999], [35993, 35998, 35626, 35653, 35647, 35649, 35716, 35664, 35659, 35716, 35657, 35649, 35649, 35664, 35716, 35669, 35659, 35665, 35595, 35716, 35999, 35993, 35998, 35620, 35649, 35656, 35656, 35659, 35582, 35716, 35999] ] for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens): self.assertListEqual(tokenized_chat, expected_tokens)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch graphormer model import copy import inspect import os import tempfile import unittest from transformers import graphormerconfig istorchavailable from transformers testingutils import requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import tensor from transformers import graphormerforgraphclassification graphormermodel from transformers models graphormer modelinggraphormer import graphormerpretrainedmodelarchivelist class graphormermodeltester def init self parent numclasses1 numatoms32 9 numedges32 3 numindegree32 numoutdegree32 numspatial32 numedgedis16 multihopmaxdist5 sometimes is 20 spatialposmax32 edgetypemultihop initfnnone maxnodes32 shareinputoutputembedfalse numhiddenlayers2 embeddingdim32 ffnembeddingdim32 numattentionheads4 dropout0 1 attentiondropout0 1 activationdropout0 1 layerdrop0 0 encodernormalizebeforefalse prelayernormfalse applygraphormerinitfalse activationfngelu embedscalenone freezeembeddingsfalse numtranslayerstofreeze0 traceablefalse qnoise0 0 qnblocksize8 kdimnone vdimnone biastrue selfattentiontrue batchsize10 graphsize20 istrainingtrue self parent parent self numclasses numclasses self numlabels numclasses self numatoms numatoms self numindegree numindegree self numoutdegree numoutdegree self numedges numedges self numspatial numspatial self numedgedis numedgedis self edgetype edgetype self multihopmaxdist multihopmaxdist self spatialposmax spatialposmax self maxnodes maxnodes self numhiddenlayers numhiddenlayers self embeddingdim embeddingdim self hiddensize embeddingdim self ffnembeddingdim ffnembeddingdim self numattentionheads numattentionheads self dropout dropout self attentiondropout attentiondropout self activationdropout activationdropout self layerdrop layerdrop self encodernormalizebefore encodernormalizebefore self prelayernorm prelayernorm self applygraphormerinit applygraphormerinit self activationfn activationfn self embedscale embedscale self freezeembeddings freezeembeddings self numtranslayerstofreeze numtranslayerstofreeze self shareinputoutputembed shareinputoutputembed self traceable traceable self qnoise qnoise self qnblocksize qnblocksize self initfn initfn self kdim kdim self vdim vdim self selfattention selfattention self bias bias self batchsize batchsize self graphsize graphsize self istraining istraining def prepareconfigandinputsself attnbias idstensor self batchsize self graphsize 1 self graphsize 1 self numatoms def not sure here attnedgetype idstensorself batchsize self graphsize self graphsize 1 self numedges spatialpos idstensorself batchsize self graphsize self graphsize self numspatial indegree idstensorself batchsize self graphsize self numindegree outdegree idstensorself batchsize self graphsize self numoutdegree inputnodes idstensorself batchsize self graphsize 1 self numatoms inputedges idstensor self batchsize self graphsize self graphsize self multihopmaxdist 1 self numedges labels idstensorself batchsize self numclasses config self getconfig return config attnbias attnedgetype spatialpos indegree outdegree inputnodes inputedges labels def getconfigself return graphormerconfig numatomsself numatoms numindegreeself numindegree numoutdegreeself numoutdegree numedgesself numedges numspatialself numspatial numedgedisself numedgedis edgetypeself edgetype multihopmaxdistself multihopmaxdist spatialposmaxself spatialposmax maxnodesself maxnodes numhiddenlayersself numhiddenlayers embeddingdimself embeddingdim hiddensizeself embeddingdim ffnembeddingdimself ffnembeddingdim numattentionheadsself numattentionheads dropoutself dropout attentiondropoutself attentiondropout activationdropoutself activationdropout layerdropself layerdrop encodernormalizebeforeself encodernormalizebefore prelayernormself prelayernorm applygraphormerinitself applygraphormerinit activationfnself activationfn embedscaleself embedscale freezeembeddingsself freezeembeddings numtranslayerstofreezeself numtranslayerstofreeze shareinputoutputembedself shareinputoutputembed traceableself traceable qnoiseself qnoise qnblocksizeself qnblocksize initfnself initfn kdimself kdim vdimself vdim selfattentionself selfattention biasself bias def createandcheckmodel self config attnbias attnedgetype spatialpos indegree outdegree inputnodes inputedges labels model graphormermodelconfigconfig model totorchdevice model eval result model inputnodesinputnodes attnbiasattnbias indegreeindegree outdegreeoutdegree spatialposspatialpos inputedgesinputedges attnedgetypeattnedgetype labelslabels self parent assertequal result lasthiddenstate shape self batchsize self graphsize 1 self hiddensize def createandcheckforgraphclassification self config attnbias attnedgetype spatialpos indegree outdegree inputnodes inputedges labels model graphormerforgraphclassificationconfig model totorchdevice model eval result model inputnodesinputnodes attnbiasattnbias indegreeindegree outdegreeoutdegree spatialposspatialpos inputedgesinputedges attnedgetypeattnedgetype labelslabels self parent assertequalresult logits shape self batchsize self numlabels def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config attnbias attnedgetype spatialpos indegree outdegree inputnodes inputedges labels configandinputs inputsdict attnbias attnbias attnedgetype attnedgetype spatialpos spatialpos indegree indegree outdegree outdegree inputnodes inputnodes inputedges inputedges labels labels return config inputsdict requiretorch class graphormermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses graphormerforgraphclassification graphormermodel if istorchavailable else allgenerativemodelclasses pipelinemodelmapping featureextraction graphormermodel if istorchavailable else testpruning false testheadmasking false testresizeembeddings false maininputnamenodes inputnodes maininputnameedges inputedges hasattentions false does not output attention def setupself self modeltester graphormermodeltesterself self configtester configtesterself configclassgraphormerconfig hastextmodalityfalse overwrite from common as graphormer requires more input arguments def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval inputs self prepareforclassinputsdict modelclass try requiredkeys inputnodes inputedges attnbias indegree outdegree spatialpos attnedgetype requiredinputs tupleinputsk for k in requiredkeys modelrequiredinputs tracedmodel torch jit tracemodel requiredinputs except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items if layername in loadedmodelstatedict p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb self cleartorchjitclassregistry def testconfigself self configtester runcommontests unittest skipreasongraphormer does not use one single inputsembedding but three def testinputsembedsself pass unittest skipreasongraphormer does not implement feed forward chunking def testfeedforwardchunkingself pass unittest skipreasongraphormer does not share input and output embeddings def testmodelcommonattributesself pass def testinitializationself def configzeroinitconfig configsnoinit copy deepcopyconfig for key in configsnoinit dict keys if range in key or std in key or initializerfactor in key or layerscale in key setattrconfigsnoinit key 1e10 return configsnoinit config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers batchsize self modeltester batchsize self assertlistequal listhiddenstates0 shape2 batchsize self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses always returns hiddenstates checkhiddenstatesoutputinputsdict config modelclass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions false no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice outputs modelinputsdict output outputs0 hiddenstates outputs hiddenstates0 hiddenstates retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad inputs are inputnodes and inputedges not inputids def testmodelmaininputnameself for modelclass in self allmodelclasses modelsignature inspect signaturegetattrmodelclass forward the main input is the name of the argument after self observedmaininputnamenodes listmodelsignature parameters keys1 observedmaininputnameedges listmodelsignature parameters keys2 self assertequalmodelclass maininputnamenodes observedmaininputnamenodes self assertequalmodelclass maininputnameedges observedmaininputnameedges def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputnodes inputedges self assertlistequalargnames 2 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforgraphclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforgraphclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in graphormerpretrainedmodelarchivelist 1 model graphormerforgraphclassification frompretrainedmodelname self assertisnotnonemodel requiretorch class graphormermodelintegrationtestunittest testcase slow def testinferencegraphclassificationself model graphormerforgraphclassification frompretrainedclefourriergraphormerbasepcqm4mv2 actual real graph data from the mutag dataset fmt off modelinput attnbias tensor 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 floatinf floatinf floatinf floatinf attnedgetype tensor 0 3 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 3 0 3 3 0 0 0 0 0 0 0 0 0 0 3 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 3 0 0 0 0 0 0 3 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 3 0 0 0 0 0 0 0 0 0 0 3 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 0 3 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 3 0 0 0 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 3 3 0 0 0 0 0 0 3 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt on spatialpos tensor 1 2 3 4 3 2 4 5 6 5 6 7 8 7 9 10 10 2 1 2 3 4 3 5 6 5 4 5 6 7 6 8 9 9 3 2 1 2 3 4 4 5 4 3 4 5 6 5 7 8 8 4 3 2 1 2 3 3 4 3 2 3 4 5 4 6 7 7 3 4 3 2 1 2 2 3 4 3 4 5 6 5 7 8 8 2 3 4 3 2 1 3 4 5 4 5 6 7 6 8 9 9 4 5 4 3 2 3 1 2 3 4 5 6 5 4 6 7 7 5 6 5 4 3 4 2 1 2 3 4 5 4 3 5 6 6 6 5 4 3 4 5 3 2 1 2 3 4 3 2 4 5 5 5 4 3 2 3 4 4 3 2 1 2 3 4 3 5 6 6 6 5 4 3 4 5 5 4 3 2 1 2 3 4 4 5 5 7 6 5 4 5 6 6 5 4 3 2 1 2 3 3 4 4 8 7 6 5 6 7 5 4 3 4 3 2 1 2 2 3 3 7 6 5 4 5 6 4 3 2 3 4 3 2 1 3 4 4 9 8 7 6 7 8 6 5 4 5 4 3 2 3 1 2 2 10 9 8 7 8 9 7 6 5 6 5 4 3 4 2 1 3 10 9 8 7 8 9 7 6 5 6 5 4 3 4 2 3 1 1 2 3 4 5 6 5 4 3 2 4 5 5 0 0 0 0 2 1 2 3 4 5 4 3 4 3 5 6 6 0 0 0 0 3 2 1 2 3 4 3 2 3 4 4 5 5 0 0 0 0 4 3 2 1 2 3 4 3 4 5 5 6 6 0 0 0 0 5 4 3 2 1 2 3 4 5 6 6 7 7 0 0 0 0 6 5 4 3 2 1 2 3 4 5 5 6 6 0 0 0 0 5 4 3 4 3 2 1 2 3 4 4 5 5 0 0 0 0 4 3 2 3 4 3 2 1 2 3 3 4 4 0 0 0 0 3 4 3 4 5 4 3 2 1 2 2 3 3 0 0 0 0 2 3 4 5 6 5 4 3 2 1 3 4 4 0 0 0 0 4 5 4 5 6 5 4 3 2 3 1 2 2 0 0 0 0 5 6 5 6 7 6 5 4 3 4 2 1 3 0 0 0 0 5 6 5 6 7 6 5 4 3 4 2 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 indegree tensor 3 3 3 4 4 3 3 3 4 4 3 3 4 3 4 2 2 3 3 4 3 3 3 3 4 4 3 4 2 2 0 0 0 0 outdegree tensor 3 3 3 4 4 3 3 3 4 4 3 3 4 3 4 2 2 3 3 4 3 3 3 3 4 4 3 4 2 2 0 0 0 0 inputnodes tensor 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 0 0 0 0 inputedges tensor 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 0 0 0 0 0 0 0 0 4 0 0 0 0 4 0 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 0 0 0 0 0 0 0 0 0 4 4 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 0 0 0 0 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 0 0 0 0 0 4 4 0 0 0 4 4 4 0 0 4 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 4 0 0 4 4 4 4 0 4 4 4 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 0 0 0 0 4 4 0 0 0 0 0 0 0 0 4 0 0 0 0 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 0 0 0 0 0 0 0 0 0 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 4 4 0 4 4 4 4 4 4 4 4 4 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 0 4 4 4 0 0 4 4 0 0 0 4 4 4 0 0 4 0 0 0 0 4 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 labels tensor1 0 output modelmodelinputlogits expectedshape torch size2 1 self assertequaloutput shape expectedshape expectedlogs torch tensor 7 6060 7 4126 self asserttruetorch allcloseoutput expectedlogs atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch graphormer model sometimes is 20 def not sure here does not output attention overwrite from common as graphormer requires more input arguments to be sure we have no nan avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb always returns hidden_states no need to test all models as different heads yield the same functionality inputs are input_nodes and input_edges not input_ids the main input is the name of the argument after self signature parameters is an ordereddict so arg_names order is deterministic actual real graph data from the mutag dataset fmt off fmt on
import copy import inspect import os import tempfile import unittest from transformers import GraphormerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import tensor from transformers import GraphormerForGraphClassification, GraphormerModel from transformers.models.graphormer.modeling_graphormer import GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST class GraphormerModelTester: def __init__( self, parent, num_classes=1, num_atoms=32 * 9, num_edges=32 * 3, num_in_degree=32, num_out_degree=32, num_spatial=32, num_edge_dis=16, multi_hop_max_dist=5, spatial_pos_max=32, edge_type="multi_hop", init_fn=None, max_nodes=32, share_input_output_embed=False, num_hidden_layers=2, embedding_dim=32, ffn_embedding_dim=32, num_attention_heads=4, dropout=0.1, attention_dropout=0.1, activation_dropout=0.1, layerdrop=0.0, encoder_normalize_before=False, pre_layernorm=False, apply_graphormer_init=False, activation_fn="gelu", embed_scale=None, freeze_embeddings=False, num_trans_layers_to_freeze=0, traceable=False, q_noise=0.0, qn_block_size=8, kdim=None, vdim=None, bias=True, self_attention=True, batch_size=10, graph_size=20, is_training=True, ): self.parent = parent self.num_classes = num_classes self.num_labels = num_classes self.num_atoms = num_atoms self.num_in_degree = num_in_degree self.num_out_degree = num_out_degree self.num_edges = num_edges self.num_spatial = num_spatial self.num_edge_dis = num_edge_dis self.edge_type = edge_type self.multi_hop_max_dist = multi_hop_max_dist self.spatial_pos_max = spatial_pos_max self.max_nodes = max_nodes self.num_hidden_layers = num_hidden_layers self.embedding_dim = embedding_dim self.hidden_size = embedding_dim self.ffn_embedding_dim = ffn_embedding_dim self.num_attention_heads = num_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop self.encoder_normalize_before = encoder_normalize_before self.pre_layernorm = pre_layernorm self.apply_graphormer_init = apply_graphormer_init self.activation_fn = activation_fn self.embed_scale = embed_scale self.freeze_embeddings = freeze_embeddings self.num_trans_layers_to_freeze = num_trans_layers_to_freeze self.share_input_output_embed = share_input_output_embed self.traceable = traceable self.q_noise = q_noise self.qn_block_size = qn_block_size self.init_fn = init_fn self.kdim = kdim self.vdim = vdim self.self_attention = self_attention self.bias = bias self.batch_size = batch_size self.graph_size = graph_size self.is_training = is_training def prepare_config_and_inputs(self): attn_bias = ids_tensor( [self.batch_size, self.graph_size + 1, self.graph_size + 1], self.num_atoms ) attn_edge_type = ids_tensor([self.batch_size, self.graph_size, self.graph_size, 1], self.num_edges) spatial_pos = ids_tensor([self.batch_size, self.graph_size, self.graph_size], self.num_spatial) in_degree = ids_tensor([self.batch_size, self.graph_size], self.num_in_degree) out_degree = ids_tensor([self.batch_size, self.graph_size], self.num_out_degree) input_nodes = ids_tensor([self.batch_size, self.graph_size, 1], self.num_atoms) input_edges = ids_tensor( [self.batch_size, self.graph_size, self.graph_size, self.multi_hop_max_dist, 1], self.num_edges ) labels = ids_tensor([self.batch_size], self.num_classes) config = self.get_config() return config, attn_bias, attn_edge_type, spatial_pos, in_degree, out_degree, input_nodes, input_edges, labels def get_config(self): return GraphormerConfig( num_atoms=self.num_atoms, num_in_degree=self.num_in_degree, num_out_degree=self.num_out_degree, num_edges=self.num_edges, num_spatial=self.num_spatial, num_edge_dis=self.num_edge_dis, edge_type=self.edge_type, multi_hop_max_dist=self.multi_hop_max_dist, spatial_pos_max=self.spatial_pos_max, max_nodes=self.max_nodes, num_hidden_layers=self.num_hidden_layers, embedding_dim=self.embedding_dim, hidden_size=self.embedding_dim, ffn_embedding_dim=self.ffn_embedding_dim, num_attention_heads=self.num_attention_heads, dropout=self.dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, layerdrop=self.layerdrop, encoder_normalize_before=self.encoder_normalize_before, pre_layernorm=self.pre_layernorm, apply_graphormer_init=self.apply_graphormer_init, activation_fn=self.activation_fn, embed_scale=self.embed_scale, freeze_embeddings=self.freeze_embeddings, num_trans_layers_to_freeze=self.num_trans_layers_to_freeze, share_input_output_embed=self.share_input_output_embed, traceable=self.traceable, q_noise=self.q_noise, qn_block_size=self.qn_block_size, init_fn=self.init_fn, kdim=self.kdim, vdim=self.vdim, self_attention=self.self_attention, bias=self.bias, ) def create_and_check_model( self, config, attn_bias, attn_edge_type, spatial_pos, in_degree, out_degree, input_nodes, input_edges, labels ): model = GraphormerModel(config=config) model.to(torch_device) model.eval() result = model( input_nodes=input_nodes, attn_bias=attn_bias, in_degree=in_degree, out_degree=out_degree, spatial_pos=spatial_pos, input_edges=input_edges, attn_edge_type=attn_edge_type, labels=labels, ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.graph_size + 1, self.hidden_size) ) def create_and_check_for_graph_classification( self, config, attn_bias, attn_edge_type, spatial_pos, in_degree, out_degree, input_nodes, input_edges, labels ): model = GraphormerForGraphClassification(config) model.to(torch_device) model.eval() result = model( input_nodes=input_nodes, attn_bias=attn_bias, in_degree=in_degree, out_degree=out_degree, spatial_pos=spatial_pos, input_edges=input_edges, attn_edge_type=attn_edge_type, labels=labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, attn_bias, attn_edge_type, spatial_pos, in_degree, out_degree, input_nodes, input_edges, labels, ) = config_and_inputs inputs_dict = { "attn_bias": attn_bias, "attn_edge_type": attn_edge_type, "spatial_pos": spatial_pos, "in_degree": in_degree, "out_degree": out_degree, "input_nodes": input_nodes, "input_edges": input_edges, "labels": labels, } return config, inputs_dict @require_torch class GraphormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GraphormerForGraphClassification, GraphormerModel) if is_torch_available() else () all_generative_model_classes = () pipeline_model_mapping = {"feature-extraction": GraphormerModel} if is_torch_available() else {} test_pruning = False test_head_masking = False test_resize_embeddings = False main_input_name_nodes = "input_nodes" main_input_name_edges = "input_edges" has_attentions = False def setUp(self): self.model_tester = GraphormerModelTester(self) self.config_tester = ConfigTester(self, config_class=GraphormerConfig, has_text_modality=False) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: required_keys = ( "input_nodes", "input_edges", "attn_bias", "in_degree", "out_degree", "spatial_pos", "attn_edge_type", ) required_inputs = tuple(inputs[k] for k in required_keys) model(*required_inputs) traced_model = torch.jit.trace(model, required_inputs) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) self.clear_torch_jit_class_registry() def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Graphormer does not use one single inputs_embedding but three") def test_inputs_embeds(self): pass @unittest.skip(reason="Graphormer does not implement feed forward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="Graphormer does not share input and output embeddings") def test_model_common_attributes(self): pass def test_initialization(self): def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) return configs_no_init config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) batch_size = self.model_tester.batch_size self.assertListEqual( list(hidden_states[0].shape[-2:]), [batch_size, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = False model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) observed_main_input_name_nodes = list(model_signature.parameters.keys())[1] observed_main_input_name_edges = list(model_signature.parameters.keys())[2] self.assertEqual(model_class.main_input_name_nodes, observed_main_input_name_nodes) self.assertEqual(model_class.main_input_name_edges, observed_main_input_name_edges) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_nodes", "input_edges"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_graph_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_graph_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GraphormerForGraphClassification.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class GraphormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_graph_classification(self): model = GraphormerForGraphClassification.from_pretrained("clefourrier/graphormer-base-pcqm4mv2") model_input = { "attn_bias": tensor( [ [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ], [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")], ], ] ), "attn_edge_type": tensor( [ [ [[0], [3], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [3], [0], [3], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [3], [0], [3], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[3], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [3], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [3], [0], [0], [0]], [[0], [0], [0], [3], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [3], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [3], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [3], [3]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0]], ], [ [[0], [3], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0]], [[3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [3], [0], [3], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [3], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [3], [0], [0], [0], [0], [0], [0]], [[3], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [3], [3], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], ], ] ), "spatial_pos": tensor( [ [ [1, 2, 3, 4, 3, 2, 4, 5, 6, 5, 6, 7, 8, 7, 9, 10, 10], [2, 1, 2, 3, 4, 3, 5, 6, 5, 4, 5, 6, 7, 6, 8, 9, 9], [3, 2, 1, 2, 3, 4, 4, 5, 4, 3, 4, 5, 6, 5, 7, 8, 8], [4, 3, 2, 1, 2, 3, 3, 4, 3, 2, 3, 4, 5, 4, 6, 7, 7], [3, 4, 3, 2, 1, 2, 2, 3, 4, 3, 4, 5, 6, 5, 7, 8, 8], [2, 3, 4, 3, 2, 1, 3, 4, 5, 4, 5, 6, 7, 6, 8, 9, 9], [4, 5, 4, 3, 2, 3, 1, 2, 3, 4, 5, 6, 5, 4, 6, 7, 7], [5, 6, 5, 4, 3, 4, 2, 1, 2, 3, 4, 5, 4, 3, 5, 6, 6], [6, 5, 4, 3, 4, 5, 3, 2, 1, 2, 3, 4, 3, 2, 4, 5, 5], [5, 4, 3, 2, 3, 4, 4, 3, 2, 1, 2, 3, 4, 3, 5, 6, 6], [6, 5, 4, 3, 4, 5, 5, 4, 3, 2, 1, 2, 3, 4, 4, 5, 5], [7, 6, 5, 4, 5, 6, 6, 5, 4, 3, 2, 1, 2, 3, 3, 4, 4], [8, 7, 6, 5, 6, 7, 5, 4, 3, 4, 3, 2, 1, 2, 2, 3, 3], [7, 6, 5, 4, 5, 6, 4, 3, 2, 3, 4, 3, 2, 1, 3, 4, 4], [9, 8, 7, 6, 7, 8, 6, 5, 4, 5, 4, 3, 2, 3, 1, 2, 2], [10, 9, 8, 7, 8, 9, 7, 6, 5, 6, 5, 4, 3, 4, 2, 1, 3], [10, 9, 8, 7, 8, 9, 7, 6, 5, 6, 5, 4, 3, 4, 2, 3, 1], ], [ [1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 4, 5, 5, 0, 0, 0, 0], [2, 1, 2, 3, 4, 5, 4, 3, 4, 3, 5, 6, 6, 0, 0, 0, 0], [3, 2, 1, 2, 3, 4, 3, 2, 3, 4, 4, 5, 5, 0, 0, 0, 0], [4, 3, 2, 1, 2, 3, 4, 3, 4, 5, 5, 6, 6, 0, 0, 0, 0], [5, 4, 3, 2, 1, 2, 3, 4, 5, 6, 6, 7, 7, 0, 0, 0, 0], [6, 5, 4, 3, 2, 1, 2, 3, 4, 5, 5, 6, 6, 0, 0, 0, 0], [5, 4, 3, 4, 3, 2, 1, 2, 3, 4, 4, 5, 5, 0, 0, 0, 0], [4, 3, 2, 3, 4, 3, 2, 1, 2, 3, 3, 4, 4, 0, 0, 0, 0], [3, 4, 3, 4, 5, 4, 3, 2, 1, 2, 2, 3, 3, 0, 0, 0, 0], [2, 3, 4, 5, 6, 5, 4, 3, 2, 1, 3, 4, 4, 0, 0, 0, 0], [4, 5, 4, 5, 6, 5, 4, 3, 2, 3, 1, 2, 2, 0, 0, 0, 0], [5, 6, 5, 6, 7, 6, 5, 4, 3, 4, 2, 1, 3, 0, 0, 0, 0], [5, 6, 5, 6, 7, 6, 5, 4, 3, 4, 2, 3, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ], ] ), "in_degree": tensor( [ [3, 3, 3, 4, 4, 3, 3, 3, 4, 4, 3, 3, 4, 3, 4, 2, 2], [3, 3, 4, 3, 3, 3, 3, 4, 4, 3, 4, 2, 2, 0, 0, 0, 0], ] ), "out_degree": tensor( [ [3, 3, 3, 4, 4, 3, 3, 3, 4, 4, 3, 3, 4, 3, 4, 2, 2], [3, 3, 4, 3, 3, 3, 3, 4, 4, 3, 4, 2, 2, 0, 0, 0, 0], ] ), "input_nodes": tensor( [ [[3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3]], [[3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [0], [0], [0], [0]], ] ), "input_edges": tensor( [ [ [ [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], ], [ [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], ], [ [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], ], [ [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], ], [ [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], ], [ [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], ], [ [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], ], [ [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], ], [ [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [0]], ], [ [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], ], [ [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [0]], ], [ [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [0], [0]], ], [ [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [0], [0], [0]], ], [ [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [0], [0]], ], [ [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], ], [ [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], ], [ [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], ], [ [ [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [4]], [[4], [4], [4], [4], [0]], [[4], [4], [4], [0], [0]], [[4], [4], [0], [0], [0]], [[4], [4], [4], [0], [0]], [[4], [0], [0], [0], [0]], [[4], [4], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], [ [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]], ], ], ] ), "labels": tensor([1, 0]), } output = model(**model_input)["logits"] expected_shape = torch.Size((2, 1)) self.assertEqual(output.shape, expected_shape) expected_logs = torch.tensor( [[7.6060], [7.4126]] ) self.assertTrue(torch.allclose(output, expected_logs, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch groupvit model import inspect import os import random import tempfile import unittest import numpy as np import requests from transformers import groupvitconfig groupvittextconfig groupvitvisionconfig from transformers testingutils import ispttfcrosstest requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import groupvitmodel groupvittextmodel groupvitvisionmodel from transformers models groupvit modelinggroupvit import groupvitpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import clipprocessor class groupvitvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 depths6 3 3 numgrouptokens64 8 0 numoutputgroups64 8 8 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self depths depths self numhiddenlayers sumdepths self expectednumhiddenlayers lendepths 1 self numgrouptokens numgrouptokens self numoutputgroups numoutputgroups self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope numpatches imagesize patchsize 2 no cls token for groupvit self seqlength numpatches def prepareconfigandinputsself rng random random0 pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize rngrng config self getconfig return config pixelvalues def getconfigself return groupvitvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize depthsself depths numgrouptokensself numgrouptokens numoutputgroupsself numoutputgroups numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model groupvitvisionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues self parent assertequal result lasthiddenstate shape self batchsize self numoutputgroups1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class groupvitvisionmodeltestmodeltestermixin unittest testcase allmodelclasses groupvitvisionmodel if istorchavailable else testpruning false testtorchscript false testresizeembeddings false testheadmasking false def setupself self modeltester groupvitvisionmodeltesterself self configtester configtester self configclassgroupvitvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasongroupvit does not use inputsembeds def testinputsembedsself pass ispttfcrosstest def testpttfmodelequivalenceself import tensorflow as tf seed 338 random seedseed np random seedseed torch manualseedseed torch cuda manualseedallseed tf random setseedseed return super testpttfmodelequivalence def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none expectednumattentionoutputs sumg 0 for g in self modeltester numgrouptokens for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions groupvit returns attention grouping of each stage self assertequallenattentions sumg 0 for g in self modeltester numgrouptokens check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions groupvit returns attention grouping of each stage self assertequallenattentions expectednumattentionoutputs outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions groupvit returns attention grouping of each stage self assertequallenselfattentions expectednumattentionoutputs for i selfattn in enumerateselfattentions if selfattn is none continue self assertlistequal listselfattentionsi shape2 self modeltester numoutputgroupsi self modeltester numoutputgroupsi 1 if i 0 else seqlen def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasongroupvitvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasongroupvitvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass override since the attention mask from groupvit is not used to compute loss thus no grad def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions self hasattentions no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice inputs self prepareforclassinputsdict modelclass outputs modelinputs output outputs0 if config isencoderdecoder seq2seq models encoderhiddenstates outputs encoderhiddenstates0 encoderhiddenstates retaingrad decoderhiddenstates outputs decoderhiddenstates0 decoderhiddenstates retaingrad if self hasattentions encoderattentions outputs encoderattentions0 encoderattentions retaingrad decoderattentions outputs decoderattentions0 decoderattentions retaingrad crossattentions outputs crossattentions0 crossattentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnoneencoderhiddenstates grad self assertisnotnonedecoderhiddenstates grad if self hasattentions self assertisnotnoneencoderattentions grad self assertisnotnonedecoderattentions grad self assertisnotnonecrossattentions grad else encoderdecoderonly models hiddenstates outputs hiddenstates0 hiddenstates retaingrad if self hasattentions attentions outputs attentions0 attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad if self hasattentions self assertisnoneattentions grad slow def testmodelfrompretrainedself for modelname in groupvitpretrainedmodelarchivelist 1 model groupvitvisionmodel frompretrainedmodelname self assertisnotnonemodel class groupvittextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope def prepareconfigandinputsself rng random random0 inputids idstensorself batchsize self seqlength self vocabsize rngrng inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return groupvittextconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange def createandcheckmodelself config inputids inputmask model groupvittextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class groupvittextmodeltestmodeltestermixin unittest testcase allmodelclasses groupvittextmodel if istorchavailable else testpruning false testheadmasking false def setupself self modeltester groupvittextmodeltesterself self configtester configtesterself configclassgroupvittextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasongroupvittextmodel does not use inputsembeds def testinputsembedsself pass unittest skipreasongroupvittextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasongroupvittextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in groupvitpretrainedmodelarchivelist 1 model groupvittextmodel frompretrainedmodelname self assertisnotnonemodel class groupvitmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester groupvittextmodeltesterparent textkwargs self visionmodeltester groupvitvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return groupvitconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model groupvitmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues returnloss true return config inputsdict requiretorch class groupvitmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses groupvitmodel if istorchavailable else pipelinemodelmapping featureextraction groupvitmodel if istorchavailable else testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false def setupself self modeltester groupvitmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates are tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputembeds are tested in individual model tests def testinputsembedsself pass unittest skipreasontested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasongroupvitmodel does not have inputoutput embeddings def testmodelcommonattributesself pass overwritten from parent as this equivalent test needs a specific seed and hard to get a good one def checkpttfoutputsself tfoutputs ptoutputs modelclass tol2e5 nameoutputs attributesnone super checkpttfoutputstfoutputs ptoutputs modelclass toltol namename attributesattributes ispttfcrosstest def testpttfmodelequivalenceself import tensorflow as tf seed 163 random seedseed np random seedseed torch manualseedseed torch cuda manualseedallseed tf random setseedseed return super testpttfmodelequivalence override as the logitscale parameter initilization is different for groupvit def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues groupvit needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save groupvitconfig and check if we can load groupvitvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig groupvitvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save groupvitconfig and check if we can load groupvittextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig groupvittextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in groupvitpretrainedmodelarchivelist 1 model groupvitmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg url http images cocodataset orgval2017000000039769 jpg im image openrequests geturl streamtrue raw return im requirevision requiretorch class groupvitmodelintegrationtestunittest testcase slow def testinferenceself modelname nvidiagroupvitgccyfcc model groupvitmodel frompretrainedmodelname processor clipprocessor frompretrainedmodelname image prepareimg inputs processor texta photo of a cat a photo of a dog imagesimage paddingtrue returntensorspt forward pass with torch nograd outputs modelinputs verify the logits self assertequal outputs logitsperimage shape torch sizeinputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape torch sizeinputs inputids shape0 inputs pixelvalues shape0 expectedlogits torch tensor13 3523 6 3629 self asserttruetorch allcloseoutputs logitsperimage expectedlogits atol1e3 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch groupvit model no cls token for groupvit here we also overwrite some of the tests of test_modeling_common py as groupvit does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic groupvit returns attention grouping of each stage check that output_attentions also work using config groupvit returns attention grouping of each stage check attention is always last and order is fine groupvit returns attention grouping of each stage override since the attention mask from groupvit is not used to compute loss thus no grad no need to test all models as different heads yield the same functionality seq2seq models encoder decoder only models overwritten from parent as this equivalent test needs a specific seed and hard to get a good one override as the logit_scale parameter initilization is different for groupvit check if logit_scale is initilized as per the original implementation to be sure we have no nan groupvit needs pixel_values save groupvitconfig and check if we can load groupvitvisionconfig from it save groupvitconfig and check if we can load groupvittextconfig from it we will verify our results on an image of cute cats forward pass verify the logits
import inspect import os import random import tempfile import unittest import numpy as np import requests from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig from transformers.testing_utils import is_pt_tf_cross_test, require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import GroupViTModel, GroupViTTextModel, GroupViTVisionModel from transformers.models.groupvit.modeling_groupvit import GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import CLIPProcessor class GroupViTVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, depths=[6, 3, 3], num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.depths = depths self.num_hidden_layers = sum(depths) self.expected_num_hidden_layers = len(depths) + 1 self.num_group_tokens = num_group_tokens self.num_output_groups = num_output_groups self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches def prepare_config_and_inputs(self): rng = random.Random(0) pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size], rng=rng) config = self.get_config() return config, pixel_values def get_config(self): return GroupViTVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, depths=self.depths, num_group_tokens=self.num_group_tokens, num_output_groups=self.num_output_groups, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = GroupViTVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.num_output_groups[-1], self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class GroupViTVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (GroupViTVisionModel,) if is_torch_available() else () test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = GroupViTVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=GroupViTVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="GroupViT does not use inputs_embeds") def test_inputs_embeds(self): pass @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): import tensorflow as tf seed = 338 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) expected_num_attention_outputs = sum(g > 0 for g in self.model_tester.num_group_tokens) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), sum(g > 0 for g in self.model_tester.num_group_tokens)) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attention_outputs) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attention_outputs) for i, self_attn in enumerate(self_attentions): if self_attn is None: continue self.assertListEqual( list(self_attentions[i].shape[-2:]), [ self.model_tester.num_output_groups[i], self.model_tester.num_output_groups[i - 1] if i > 0 else seq_len, ], ) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="GroupViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="GroupViTVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNone(attentions.grad) @slow def test_model_from_pretrained(self): for model_name in GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GroupViTVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class GroupViTTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): rng = random.Random(0) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size, rng=rng) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return GroupViTTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = GroupViTTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GroupViTTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (GroupViTTextModel,) if is_torch_available() else () test_pruning = False test_head_masking = False def setUp(self): self.model_tester = GroupViTTextModelTester(self) self.config_tester = ConfigTester(self, config_class=GroupViTTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="GroupViTTextModel does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="GroupViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="GroupViTTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GroupViTTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class GroupViTModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = GroupViTTextModelTester(parent, **text_kwargs) self.vision_model_tester = GroupViTVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return GroupViTConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = GroupViTModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class GroupViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GroupViTModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": GroupViTModel} if is_torch_available() else {} test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = GroupViTModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="hidden_states are tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="input_embeds are tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="GroupViTModel does not have input/output embeddings") def test_model_common_attributes(self): pass def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-5, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): import tensorflow as tf seed = 163 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = GroupViTVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = GroupViTTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GroupViTModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class GroupViTModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "nvidia/groupvit-gcc-yfcc" model = GroupViTModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt" ) with torch.no_grad(): outputs = model(**inputs) self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[13.3523, 6.3629]]) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow groupvit model from future import annotations import inspect import os import random import tempfile import unittest from importlib import importmodule import numpy as np import requests from transformers import groupvitconfig groupvittextconfig groupvitvisionconfig from transformers testingutils import ispttfcrosstest requiretensorflowprobability requiretf requirevision slow from transformers utils import istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfgroupvitmodel tfgroupvittextmodel tfgroupvitvisionmodel tfsharedembeddings from transformers models groupvit modelingtfgroupvit import tfgroupvitpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import clipprocessor class tfgroupvitvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 depths6 3 3 numgrouptokens64 8 0 numoutputgroups64 8 8 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self depths depths self numhiddenlayers sumdepths self expectednumhiddenlayers lendepths 1 self numgrouptokens numgrouptokens self numoutputgroups numoutputgroups self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope numpatches imagesize patchsize 2 no cls token for groupvit self seqlength numpatches def prepareconfigandinputsself rng random random0 pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize rngrng config self getconfig return config pixelvalues def getconfigself return groupvitvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize depthsself depths numgrouptokensself numgrouptokens numoutputgroupsself numoutputgroups numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model tfgroupvitvisionmodelconfigconfig result modelpixelvalues trainingfalse self parent assertequal result lasthiddenstate shape self batchsize self numoutputgroups1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfgroupvitvisionmodeltesttfmodeltestermixin unittest testcase allmodelclasses tfgroupvitvisionmodel if istfavailable else testpruning false testresizeembeddings false testheadmasking false testonnx false def checkpttfoutputsself tfoutputs ptoutputs modelclass tol1e4 nameoutputs attributesnone we override with a slightly higher tol value as this model tends to diverge a bit more super checkpttfoutputstfoutputs ptoutputs modelclass tol name attributes def setupself self modeltester tfgroupvitvisionmodeltesterself self configtester configtester self configclassgroupvitvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasongroupvit does not use inputsembeds def testinputsembedsself pass requiretensorflowprobability slow def testsavedmodelcreationself super testsavedmodelcreation unittest skipreasongroupvit does not use inputsembeds def testgraphmodewithinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings tf keras layers layer x model getoutputembeddings self asserttruex is none or isinstancex tf keras layers layer def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none expectednumattentionoutputs sumg 0 for g in self modeltester numgrouptokens for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse attentions outputs attentions groupvit returns attention grouping of each stage self assertequallenattentions sumg 0 for g in self modeltester numgrouptokens check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse attentions outputs attentions groupvit returns attention grouping of each stage self assertequallenattentions expectednumattentionoutputs outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions groupvit returns attention grouping of each stage self assertequallenselfattentions expectednumattentionoutputs for i selfattn in enumerateselfattentions if selfattn is none continue self assertlistequal listselfattentionsi shape2 self modeltester numoutputgroupsi self modeltester numoutputgroupsi 1 if i 0 else seqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers seqlength getattrself modeltester seqlength none self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass ispttfcrosstest def testpttfmodelequivalenceself groupvit computes some indices using argmax uses them as onehot encoding for further computation the problem is while pttf have very small difference in ysoft 1e9 the argmax could be totally different if there are at least 2 indices with almost identical values this leads to very large difference in the outputs we need specific seeds to avoid almost identical values happening in ysoft import torch seed 338 random seedseed np random seedseed torch manualseedseed torch cuda manualseedallseed tf random setseedseed return super testpttfmodelequivalence slow def testmodelfrompretrainedself for modelname in tfgroupvitpretrainedmodelarchivelist 1 model tfgroupvitvisionmodel frompretrainedmodelname self assertisnotnonemodel unittest skip tfgroupvitvisionmodel does not convert hiddenstates and attentions to tensors as they are all of different dimensions and we get got a nontensor value error when saving the model slow def testsavedmodelcreationextendedself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true if hasattrconfig usecache config usecache true seqlen getattrself modeltester seqlength none for modelclass in self allmodelclasses classinputsdict self prepareforclassinputsdict modelclass model modelclassconfig numout lenmodelclassinputsdict with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname savedmodeltrue savedmodeldir os path jointmpdirname savedmodel 1 model tf keras models loadmodelsavedmodeldir outputs modelclassinputsdict outputhiddenstates outputshiddenstates outputattentions outputsattentions check num outputs self assertequallenoutputs numout check num layers expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenoutputhiddenstates expectednumlayers self assertequallenoutputattentions self modeltester numhiddenlayers check attention outputs imagesize self modeltester imagesize self modeltester imagesize patchsize self modeltester patchsize self modeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 1 self assertlistequal listoutputattentions0 shape3 self modeltester numattentionheads seqlen seqlen check hidden states self assertlistequal listoutputhiddenstates0 shape2 seqlen self modeltester hiddensize class tfgroupvittextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope def prepareconfigandinputsself rng random random0 inputids idstensorself batchsize self seqlength self vocabsize rngrng inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength make sure the first token has attention mask 1 to ensure that after combining the causal mask there is still at least one token being attended to for each batch todo change randomattentionmask in pttfflax common test file after a discussion with the team inputmask tf concat tf oneslikeinputmask 1 dtypeinputmask dtype inputmask 1 axis1 config self getconfig return config inputids inputmask def getconfigself return groupvittextconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange def createandcheckmodelself config inputids inputmask model tfgroupvittextmodelconfigconfig result modelinputids attentionmaskinputmask trainingfalse result modelinputids trainingfalse self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretf class tfgroupvittextmodeltesttfmodeltestermixin unittest testcase allmodelclasses tfgroupvittextmodel if istfavailable else testpruning false testheadmasking false testonnx false def checkpttfoutputsself tfoutputs ptoutputs modelclass tol1e4 nameoutputs attributesnone we override with a slightly higher tol value as this model tends to diverge a bit more super checkpttfoutputstfoutputs ptoutputs modelclass tol name attributes def setupself self modeltester tfgroupvittextmodeltesterself self configtester configtesterself configclassgroupvittextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasongroupvittextmodel does not use inputsembeds def testinputsembedsself pass slow def testmodelfrompretrainedself for modelname in tfgroupvitpretrainedmodelarchivelist 1 model tfgroupvittextmodel frompretrainedmodelname self assertisnotnonemodel slow def testsavedmodelcreationextendedself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true if hasattrconfig usecache config usecache true for modelclass in self allmodelclasses classinputsdict self prepareforclassinputsdict modelclass model modelclassconfig numout lenmodelclassinputsdict with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname savedmodeltrue savedmodeldir os path jointmpdirname savedmodel 1 model tf keras models loadmodelsavedmodeldir outputs modelclassinputsdict outputhiddenstates outputshiddenstates outputattentions outputsattentions check number of outputs self assertequallenoutputs numout check number of layers expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 check hidden states self assertequallenoutputhiddenstates expectednumlayers self assertlistequal listoutputhiddenstates0 shape2 self modeltester seqlength self modeltester hiddensize check attention outputs self assertequallenoutputattentions self modeltester numhiddenlayers seqlength self modeltester seqlength keylength getattrself modeltester keylength seqlength self assertlistequal listoutputattentions0 shape3 self modeltester numattentionheads seqlength keylength class tfgroupvitmodeltester def initself parent istrainingtrue self parent parent self textmodeltester tfgroupvittextmodeltesterparent self visionmodeltester tfgroupvitvisionmodeltesterparent self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return groupvitconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model tfgroupvitmodelconfig result modelinputids pixelvalues attentionmask trainingfalse self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues returnloss true return config inputsdict requiretf class tfgroupvitmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfgroupvitmodel if istfavailable else pipelinemodelmapping featureextraction tfgroupvitmodel if istfavailable else testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testonnx false def checkpttfoutputsself tfoutputs ptoutputs modelclass tol1e4 nameoutputs attributesnone we override with a slightly higher tol value as this model tends to diverge a bit more super checkpttfoutputstfoutputs ptoutputs modelclass tol name attributes def setupself self modeltester tfgroupvitmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates are tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputembeds are tested in individual model tests def testinputsembedsself pass unittest skipreasonclipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass requiretensorflowprobability slow def testkerasfitself super testkerasfit ispttfcrosstest def testpttfmodelequivalenceself groupvit computes some indices using argmax uses them as onehot encoding for further computation the problem is while pttf have very small difference in ysoft 1e9 the argmax could be totally different if there are at least 2 indices with almost identical values this leads to very large difference in the outputs we need specific seeds to avoid almost identical values happening in ysoft import torch seed 158 random seedseed np random seedseed torch manualseedseed torch cuda manualseedallseed tf random setseedseed return super testpttfmodelequivalence overwrite from common since tfgroupvitmodeltester set returnloss to true and causes the preparation of symbolicinputs failed def testkerassaveloadself config inputsdict self modeltester prepareconfigandinputsforcommon remove returnloss to make code work if self class name tfgroupvitmodeltest inputsdict popreturnloss none tfmainlayerclasses modulemember for modelclass in self allmodelclasses for module in importmodulemodelclass module for modulemembername in dirmodule if modulemembername endswithmainlayer this condition is required since modelingtfclip py has 3 classes whose names end with mainlayer and modulemembername lenmainlayer modelclass name lenmodel for modulemember in getattrmodule modulemembername if isinstancemodulemember type and tf keras layers layer in modulemember bases and getattrmodulemember kerasserializable false for mainlayerclass in tfmainlayerclasses t5mainlayer needs an embedtokens parameter when called without the inputsembeds parameter if t5 in mainlayerclass name take the same values than in tft5modeltester for this shared layer shared tfsharedembeddings99 32 nameshared config usecache inputsdict popusecache none mainlayer mainlayerclassconfig embedtokensshared else mainlayer mainlayerclassconfig symbolicinputs name tf keras inputtensor shape1 dtypetensor dtype for name tensor in inputsdict items model tf keras modelsymbolicinputs outputsmainlayersymbolicinputs outputs modelinputsdict with tempfile temporarydirectory as tmpdirname filepath os path jointmpdirname kerasmodel h5 model savefilepath if t5 in mainlayerclass name model tf keras models loadmodel filepath customobjects mainlayerclass name mainlayerclass tfsharedembeddings tfsharedembeddings else model tf keras models loadmodel filepath customobjectsmainlayerclass name mainlayerclass assert isinstancemodel tf keras model afteroutputs modelinputsdict self assertoutputssameafteroutputs outputs slow def testmodelfrompretrainedself for modelname in tfgroupvitpretrainedmodelarchivelist 1 model tfgroupvitmodel frompretrainedmodelname self assertisnotnonemodel unittest skipreasoncurrently savedmodel doesn t work with nested outputs slow def testsavedmodelcreationself pass unittest skipreasonsavedmodel doesn t work with nested outputs so no preparation happens slow def testprepareservingoutputself pass we will verify our results on an image of cute cats def prepareimg url http images cocodataset orgval2017000000039769 jpg im image openrequests geturl streamtrue raw return im requirevision requiretf class tfgroupvitmodelintegrationtestunittest testcase slow def testinferenceself modelname nvidiagroupvitgccyfcc model tfgroupvitmodel frompretrainedmodelname processor clipprocessor frompretrainedmodelname image prepareimg inputs processor texta photo of a cat a photo of a dog imagesimage paddingtrue returntensorstf outputs modelinputs trainingfalse verify the logits self assertequal outputs logitsperimage shape tf tensorshapeinputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape tf tensorshapeinputs inputids shape0 inputs pixelvalues shape0 expectedlogits tf constant13 3523 6 3629 tf debugging assertnearoutputs logitsperimage expectedlogits atol1e3 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow groupvit model no cls token for groupvit here we also overwrite some of the tests of test_modeling_common py as groupvit does not use input_ids inputs_embeds attention_mask and seq_length we override with a slightly higher tol value as this model tends to diverge a bit more during saving tensorflow will also run with training true which trigger gumbel_softmax that requires tensorflow probability signature parameters is an ordereddict so arg_names order is deterministic groupvit returns attention grouping of each stage check that output_attentions also work using config groupvit returns attention grouping of each stage check attention is always last and order is fine groupvit returns attention grouping of each stage check that output_hidden_states also work using config groupvit computes some indices using argmax uses them as one hot encoding for further computation the problem is while pt tf have very small difference in y_soft 1e 9 the argmax could be totally different if there are at least 2 indices with almost identical values this leads to very large difference in the outputs we need specific seeds to avoid almost identical values happening in y_soft check num outputs check num layers check attention outputs check hidden states make sure the first token has attention mask 1 to ensure that after combining the causal mask there is still at least one token being attended to for each batch todo change random_attention_mask in pt tf flax common test file after a discussion with the team we override with a slightly higher tol value as this model tends to diverge a bit more check number of outputs check number of layers check hidden states check attention outputs we override with a slightly higher tol value as this model tends to diverge a bit more groupvit computes some indices using argmax uses them as one hot encoding for further computation the problem is while pt tf have very small difference in y_soft 1e 9 the argmax could be totally different if there are at least 2 indices with almost identical values this leads to very large difference in the outputs we need specific seeds to avoid almost identical values happening in y_soft overwrite from common since tfgroupvitmodeltester set return_loss to true and causes the preparation of symbolic_inputs failed remove return_loss to make code work this condition is required since modeling_tf_clip py has 3 classes whose names end with mainlayer t5mainlayer needs an embed_tokens parameter when called without the inputs_embeds parameter take the same values than in tft5modeltester for this shared layer we will verify our results on an image of cute cats verify the logits
from __future__ import annotations import inspect import os import random import tempfile import unittest from importlib import import_module import numpy as np import requests from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig from transformers.testing_utils import ( is_pt_tf_cross_test, require_tensorflow_probability, require_tf, require_vision, slow, ) from transformers.utils import is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFGroupViTModel, TFGroupViTTextModel, TFGroupViTVisionModel, TFSharedEmbeddings from transformers.models.groupvit.modeling_tf_groupvit import TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import CLIPProcessor class TFGroupViTVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, depths=[6, 3, 3], num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.depths = depths self.num_hidden_layers = sum(depths) self.expected_num_hidden_layers = len(depths) + 1 self.num_group_tokens = num_group_tokens self.num_output_groups = num_output_groups self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches def prepare_config_and_inputs(self): rng = random.Random(0) pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size], rng=rng) config = self.get_config() return config, pixel_values def get_config(self): return GroupViTVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, depths=self.depths, num_group_tokens=self.num_group_tokens, num_output_groups=self.num_output_groups, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = TFGroupViTVisionModel(config=config) result = model(pixel_values, training=False) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.num_output_groups[-1], self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFGroupViTVisionModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFGroupViTVisionModel,) if is_tf_available() else () test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def setUp(self): self.model_tester = TFGroupViTVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=GroupViTVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="GroupViT does not use inputs_embeds") def test_inputs_embeds(self): pass @require_tensorflow_probability @slow def test_saved_model_creation(self): super().test_saved_model_creation() @unittest.skip(reason="GroupViT does not use inputs_embeds") def test_graph_mode_with_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) expected_num_attention_outputs = sum(g > 0 for g in self.model_tester.num_group_tokens) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions self.assertEqual(len(attentions), sum(g > 0 for g in self.model_tester.num_group_tokens)) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attention_outputs) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attention_outputs) for i, self_attn in enumerate(self_attentions): if self_attn is None: continue self.assertListEqual( list(self_attentions[i].shape[-2:]), [ self.model_tester.num_output_groups[i], self.model_tester.num_output_groups[i - 1] if i > 0 else seq_len, ], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) seq_length = getattr(self.model_tester, "seq_length", None) self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): import torch seed = 338 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() @slow def test_model_from_pretrained(self): for model_name in TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFGroupViTVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip( "TFGroupViTVisionModel does not convert `hidden_states` and `attentions` to tensors as they are all of" " different dimensions, and we get `Got a non-Tensor value` error when saving the model." ) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True seq_len = getattr(self.model_tester, "seq_length", None) for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] self.assertEqual(len(outputs), num_out) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [seq_len, self.model_tester.hidden_size], ) class TFGroupViTTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): rng = random.Random(0) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size, rng=rng) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_mask = tf.concat( [tf.ones_like(input_mask[:, :1], dtype=input_mask.dtype), input_mask[:, 1:]], axis=-1 ) config = self.get_config() return config, input_ids, input_mask def get_config(self): return GroupViTTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = TFGroupViTTextModel(config=config) result = model(input_ids, attention_mask=input_mask, training=False) result = model(input_ids, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFGroupViTTextModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFGroupViTTextModel,) if is_tf_available() else () test_pruning = False test_head_masking = False test_onnx = False def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def setUp(self): self.model_tester = TFGroupViTTextModelTester(self) self.config_tester = ConfigTester(self, config_class=GroupViTTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="GroupViTTextModel does not use inputs_embeds") def test_inputs_embeds(self): pass @slow def test_model_from_pretrained(self): for model_name in TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFGroupViTTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] self.assertEqual(len(outputs), num_out) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) seq_length = self.model_tester.seq_length key_length = getattr(self.model_tester, "key_length", seq_length) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, key_length], ) class TFGroupViTModelTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = TFGroupViTTextModelTester(parent) self.vision_model_tester = TFGroupViTVisionModelTester(parent) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return GroupViTConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFGroupViTModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_tf class TFGroupViTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFGroupViTModel,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFGroupViTModel} if is_tf_available() else {} test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def setUp(self): self.model_tester = TFGroupViTModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="hidden_states are tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="input_embeds are tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="CLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass @require_tensorflow_probability @slow def test_keras_fit(self): super().test_keras_fit() @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): import torch seed = 158 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() def test_keras_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if self.__class__.__name__ == "TFGroupViTModelTest": inputs_dict.pop("return_loss", None) tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and tf.keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: if "T5" in main_layer_class.__name__: shared = TFSharedEmbeddings(99, 32, name="shared") config.use_cache = inputs_dict.pop("use_cache", None) main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = tf.keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = tf.keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, tf.keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) @slow def test_model_from_pretrained(self): for model_name in TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFGroupViTModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") @slow def test_saved_model_creation(self): pass @unittest.skip(reason="`saved_model` doesn't work with nested outputs so no preparation happens.") @slow def test_prepare_serving_output(self): pass def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_tf class TFGroupViTModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "nvidia/groupvit-gcc-yfcc" model = TFGroupViTModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="tf" ) outputs = model(**inputs, training=False) self.assertEqual( outputs.logits_per_image.shape, tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, tf.TensorShape((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = tf.constant([[13.3523, 6.3629]]) tf.debugging.assert_near(outputs.logits_per_image, expected_logits, atol=1e-3)
codingutf8 2018 the google ai language team s allegro pl and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license use a simpler test file without japanesechinese characters coding utf 8 2018 the google ai language team s allegro pl and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license use a simpler test file without japanese chinese characters
import json import os import unittest from transformers import HerbertTokenizer, HerbertTokenizerFast from transformers.models.herbert.tokenization_herbert import VOCAB_FILES_NAMES from transformers.testing_utils import get_tests_dir, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class HerbertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = HerbertTokenizer rust_tokenizer_class = HerbertTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() with open(f"{get_tests_dir()}/fixtures/sample_text_no_unicode.txt", encoding="utf-8") as f_data: self._data = f_data.read().replace("\n\n", "\n").strip() vocab = [ "<s>", "</s>", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", ",</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(vocab_file=self.vocab_file, merges_file=self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [16, 17, 23] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "lower,newer" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("allegro/herbert-base-cased") text = tokenizer.encode("konstruowanie sekwencji", add_special_tokens=False) text_2 = tokenizer.encode("konstruowanie wielu sekwencji", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [0] + text + [2] assert encoded_pair == [0] + text + [2] + text_2 + [2] @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer_with_special_tokens_change(self): pass @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer(self): pass
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch hubert model import math import os import pickle import tempfile import unittest import pytest from transformers import hubertconfig istorchavailable from transformers testingutils import requiresoundfile requiretorch slow torchdevice from transformers utils import istorchfxavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import hubertforctc hubertforsequenceclassification hubertmodel wav2vec2featureextractor wav2vec2processor from transformers models hubert modelinghubert import computemaskindices if istorchfxavailable from transformers utils fx import symbolictrace class hubertmodeltester def init self parent batchsize13 seqlength1024 speech is longer istrainingfalse hiddensize16 featextractnormgroup featextractdropout0 0 featextractactivationgelu convdim32 32 32 convstride4 4 4 convkernel8 8 8 convbiasfalse numconvposembeddings16 numconvposembeddinggroups2 numhiddenlayers2 numattentionheads2 hiddendropoutprob0 1 this is most likely not correctly set yet intermediatesize20 layernormeps1e5 hiddenactgelu initializerrange0 02 vocabsize32 dostablelayernormfalse scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self hiddensize hiddensize self featextractnorm featextractnorm self featextractdropout featextractdropout self featextractactivation featextractactivation self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddendropoutprob hiddendropoutprob self intermediatesize intermediatesize self layernormeps layernormeps self hiddenact hiddenact self initializerrange initializerrange self vocabsize vocabsize self dostablelayernorm dostablelayernorm self scope scope outputseqlength self seqlength for kernel stride in zipself convkernel self convstride outputseqlength outputseqlength kernel 1 stride self outputseqlength intmath ceiloutputseqlength self encoderseqlength self outputseqlength def prepareconfigandinputsself inputvalues floatstensorself batchsize self seqlength scale1 0 attentionmask randomattentionmaskself batchsize self seqlength config self getconfig return config inputvalues attentionmask def getconfigself return hubertconfig hiddensizeself hiddensize featextractnormself featextractnorm featextractdropoutself featextractdropout featextractactivationself featextractactivation convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddendropoutprobself hiddendropoutprob intermediatesizeself intermediatesize layernormepsself layernormeps hiddenactself hiddenact initializerrangeself initializerrange vocabsizeself vocabsize dostablelayernormself dostablelayernorm def createandcheckmodelself config inputvalues attentionmask model hubertmodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckbatchinferenceself config inputvalues args test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 model hubertmodelconfigconfig model totorchdevice model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch bool inputlengths inputvalues shape1 i for i in 4 2 1 pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 0 batchoutputs modelinputvalues attentionmaskattentionmask lasthiddenstate for i in rangeinputvalues shape0 inputslice inputvaluesi i 1 inputlengthsi output modelinputslice lasthiddenstate batchoutput batchoutputsi i 1 output shape1 self parent asserttruetorch allcloseoutput batchoutput atol1e3 def checkctclossself config inputvalues args model hubertforctcconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 minmaxlengthlabels 1 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 model config ctclossreduction sum sumloss modelinputvalues attentionmaskattentionmask labelslabels loss item model config ctclossreduction mean meanloss modelinputvalues attentionmaskattentionmask labelslabels loss item self parent asserttrueisinstancesumloss float self parent asserttrueisinstancemeanloss float def checkseqclassifierlossself config inputvalues args model hubertforsequenceclassificationconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 maskedloss modelinputvalues attentionmaskattentionmask labelslabels loss item unmaskedloss modelinputvalues labelslabels loss item self parent asserttrueisinstancemaskedloss float self parent asserttrueisinstanceunmaskedloss float self parent asserttruemaskedloss unmaskedloss def checkctctrainingself config inputvalues args config ctczeroinfinity true model hubertforctcconfigconfig model totorchdevice model train freeze feature encoder model freezefeatureencoder inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 if maxlengthlabelsi labels shape1 it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf labelsi maxlengthlabelsi 1 100 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkseqclassifiertrainingself config inputvalues args config ctczeroinfinity true model hubertforsequenceclassificationconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checklabelsoutofvocabself config inputvalues args model hubertforctcconfig model totorchdevice model train inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize 100 with pytest raisesvalueerror modelinputvalues labelslabels def prepareconfigandinputsforcommonself config inputvalues attentionmask self prepareconfigandinputs inputsdict inputvalues inputvalues attentionmask attentionmask return config inputsdict requiretorch class hubertmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses hubertforctc hubertforsequenceclassification hubertmodel if istorchavailable else pipelinemodelmapping audioclassification hubertforsequenceclassification automaticspeechrecognition hubertforctc featureextraction hubertmodel if istorchavailable else fxcompatible true testpruning false testheadmasking false def setupself self modeltester hubertmodeltesterself self configtester configtesterself configclasshubertconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs hubert has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass hubert cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass hubert has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed quantizer weightproj weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized hubert cannot be torchscripted because of torch nn utils weightnorm def createandchecktorchfxtracingself config inputsdict outputlossfalse todo fix it self skiptesttorch 2 1 breaks torch fx tests for wav2vec2hubert if not istorchfxavailable or not self fxcompatible return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval inputs self prepareforclassinputsdict modelclass returnlabelsoutputloss try if model config isencoderdecoder model config usecache false fstm still requires this hack fstm should probably be refactored similar to bart afterward labels inputs getlabels none inputnames attentionmask decoderattentionmask decoderinputids inputfeatures inputids inputvalues if labels is not none inputnames appendlabels filteredinputs k v for k v in inputs items if k in inputnames inputnames listfilteredinputs keys modeloutput modelfilteredinputs tracedmodel symbolictracemodel inputnames tracedoutput tracedmodelfilteredinputs else inputnames attentionmask bbox inputfeatures inputids inputvalues pixelvalues tokentypeids visualfeats visualpos labels inputs getlabels none startpositions inputs getstartpositions none endpositions inputs getendpositions none if labels is not none inputnames appendlabels if startpositions is not none inputnames appendstartpositions if endpositions is not none inputnames appendendpositions filteredinputs k v for k v in inputs items if k in inputnames inputnames listfilteredinputs keys modeloutput modelfilteredinputs tracedmodel symbolictracemodel inputnames tracedoutput tracedmodelfilteredinputs except exception as e self failfcouldn t trace module e def flattenoutputoutput flatten for x in output if isinstancex tuple list flatten flattenoutputx elif not isinstancex torch tensor continue else flatten appendx return flatten modeloutput flattenoutputmodeloutput tracedoutput flattenoutputtracedoutput numoutputs lenmodeloutput for i in rangenumoutputs self asserttrue torch allclosemodeloutputi tracedoutputi ftraced ith output doesn t match model ith output for modelclass test that the model can be serialized and restored properly with tempfile temporarydirectory as tmpdirname pklfilename os path jointmpdirname model pkl try with openpklfilename wb as f pickle dumptracedmodel f with openpklfilename rb as f loaded pickle loadf except exception as e self failfcouldn t serialize deserialize the traced model e loadedoutput loadedfilteredinputs loadedoutput flattenoutputloadedoutput for i in rangenumoutputs self asserttrue torch allclosemodeloutputi loadedoutputi fserialized model ith output doesn t match model ith output for modelclass overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model hubertmodel frompretrainedfacebookhubertbasels960 self assertisnotnonemodel requiretorch class hubertrobustmodeltestmodeltestermixin unittest testcase allmodelclasses hubertforctc hubertforsequenceclassification hubertmodel if istorchavailable else testpruning false testheadmasking false def setupself self modeltester hubertmodeltester self convstride3 3 3 featextractnormlayer dostablelayernormtrue self configtester configtesterself configclasshubertconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testbatchedinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbatchinferenceconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs hubert has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass hubert cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass hubert has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed quantizer weightproj weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model hubertmodel frompretrainedfacebookhubertlargels960ft self assertisnotnonemodel requiretorch class hubertutilstestunittest testcase def testcomputemaskindicesself batchsize 4 sequencelength 60 maskprob 0 5 masklength 1 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice self assertlistequalmask sumaxis1 tolist maskprob sequencelength for in rangebatchsize def testcomputemaskindicesoverlapself batchsize 4 sequencelength 80 maskprob 0 5 masklength 4 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice because of overlap mask don t have to add up exactly to maskprob sequencelength but have to be smaller or equal for batchsum in mask sumaxis1 self asserttrueintbatchsum maskprob sequencelength requiretorch requiresoundfile slow class hubertmodelintegrationtestunittest testcase def loaddatasamplesself numsamples from datasets import loaddataset ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid filter lambda x xid in f1272141231000i for i in rangenumsamples numsamplesaudio return xarray for x in speechsamples def loadsuperbself task numsamples from datasets import loaddataset ds loaddatasetantonlsuperbdummy task splittest return ds numsamples def testinferencectcbatchedself model hubertforctc frompretrainedfacebookhubertlargels960ft torchdtypetorch float16 to torchdevice processor wav2vec2processor frompretrainedfacebookhubertlargels960ft dolowercasetrue inputspeech self loaddatasamples2 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues half totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd logits modelinputvalues attentionmaskattentionmask logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist sweat covered brion s body trickling into the tight loin cloth that was the only garment he wore self assertlistequalpredictedtrans expectedtranscriptions def testinferencekeywordspottingself model hubertforsequenceclassification frompretrained superbhubertbasesuperbks torchdtypetorch float16 totorchdevice processor wav2vec2featureextractor frompretrainedsuperbhubertbasesuperbks inputdata self loadsuperbks 4 inputs processorinputdataspeech returntensorspt paddingtrue inputvalues inputs inputvalues half totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask predictedlogits predictedids torch maxoutputs logits dim1 expectedlabels 2 6 10 9 s3prl logits for the same batch expectedlogits torch tensor7 6692 17 7795 11 1562 11 8232 dtypetorch float16 devicetorchdevice self assertlistequalpredictedids tolist expectedlabels self asserttruetorch allclosepredictedlogits expectedlogits atol3e2 def testinferenceintentclassificationself model hubertforsequenceclassification frompretrained superbhubertbasesuperbic torchdtypetorch float16 totorchdevice processor wav2vec2featureextractor frompretrainedsuperbhubertbasesuperbic inputdata self loadsuperbic 4 inputs processorinputdataspeech returntensorspt paddingtrue inputvalues inputs inputvalues half totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask predictedlogitsaction predictedidsaction torch maxoutputs logits 6 dim1 predictedlogitsobject predictedidsobject torch maxoutputs logits 6 20 dim1 predictedlogitslocation predictedidslocation torch maxoutputs logits 20 24 dim1 expectedlabelsaction 1 0 4 3 expectedlogitsaction torch tensor 5 9052 12 5865 4 4840 10 0240 dtypetorch float16 devicetorchdevice expectedlabelsobject 1 10 3 4 expectedlogitsobject torch tensor 5 5316 11 7946 8 1672 23 2415 dtypetorch float16 devicetorchdevice expectedlabelslocation 0 0 0 1 expectedlogitslocation torch tensor 5 2053 8 9577 10 0447 8 1481 dtypetorch float16 devicetorchdevice self assertlistequalpredictedidsaction tolist expectedlabelsaction self assertlistequalpredictedidsobject tolist expectedlabelsobject self assertlistequalpredictedidslocation tolist expectedlabelslocation todo lower the tolerance after merging the padding fix https github compytorchfairseqpull3572 self asserttruetorch allclosepredictedlogitsaction expectedlogitsaction atol3e1 self asserttruetorch allclosepredictedlogitsobject expectedlogitsobject atol3e1 self asserttruetorch allclosepredictedlogitslocation expectedlogitslocation atol3e1 def testinferencespeakeridentificationself model hubertforsequenceclassification frompretrained superbhubertbasesuperbsid torchdtypetorch float16 totorchdevice processor wav2vec2featureextractor frompretrainedsuperbhubertbasesuperbsid inputdata self loadsuperbsi 4 outputlogits with torch nograd for example in inputdataspeech input processorexample returntensorspt paddingtrue output modelinput inputvalues half totorchdevice attentionmasknone outputlogits appendoutput logits0 outputlogits torch stackoutputlogits predictedlogits predictedids torch maxoutputlogits dim1 expectedlabels 5 1 1 3 s3prl logits for the same batch expectedlogits torch tensor 78231 5547 123166 6094 122785 4141 84851 2969 dtypetorch float16 devicetorchdevice self assertlistequalpredictedids tolist expectedlabels todo lower the tolerance after merging the padding fix https github compytorchfairseqpull3572 self asserttruetorch allclosepredictedlogits expectedlogits atol10 def testinferenceemotionrecognitionself model hubertforsequenceclassification frompretrained superbhubertbasesuperber torchdtypetorch float16 totorchdevice processor wav2vec2featureextractor frompretrainedsuperbhubertbasesuperber inputdata self loadsuperber 4 inputs processorinputdataspeech returntensorspt paddingtrue inputvalues inputs inputvalues half totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask predictedlogits predictedids torch maxoutputs logits dim1 expectedlabels 1 1 2 2 s3prl logits for the same batch expectedlogits torch tensor2 8384 2 3389 3 8564 4 5558 dtypetorch float16 devicetorchdevice self assertlistequalpredictedids tolist expectedlabels todo lower the tolerance after merging the padding fix https github compytorchfairseqpull3572 self asserttruetorch allclosepredictedlogits expectedlogits atol1e1 def testinferencedistilhubertself model hubertmodel frompretrainedntuspmldistilhubert totorchdevice processor wav2vec2featureextractor frompretrainedntuspmldistilhubert todo can t test on batched inputs due to incompatible padding https github compytorchfairseqpull3572 inputspeech self loaddatasamples1 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice with torch nograd outputs modelinputvalues lasthiddenstate expected outputs taken from the original sew implementation expectedoutputsfirst torch tensor 0 3505 0 1167 0 0608 0 1294 0 3085 0 0481 0 1106 0 0955 0 3107 0 0391 0 0739 0 1360 0 2385 0 1795 0 0928 0 2389 devicetorchdevice expectedoutputslast torch tensor 0 0732 0 0255 0 0529 0 1372 0 0812 0 1259 0 0564 0 0438 0 0054 0 0758 0 0002 0 1617 0 0133 0 0320 0 0687 0 0062 devicetorchdevice expectedoutputsum 3776 0730 self asserttruetorch allcloseoutputs 4 4 expectedoutputsfirst atol5e3 self asserttruetorch allcloseoutputs 4 4 expectedoutputslast atol5e3 self asserttrueabsoutputs sum expectedoutputsum 0 1 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch hubert model speech is longer this is most likely not correctly set yet test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 pad input make sure that dropout is disabled pad input make sure that dropout is disabled pad input freeze feature encoder pad input it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf freeze everything but the classification head pad input hubert has no inputs_embeds input_ids is renamed to input_values hubert cannot resize token embeddings since it has no tokens embeddings hubert has no inputs_embeds and thus the get_input_embeddings fn is not implemented no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models hubert cannot be torchscripted because of torch nn utils weight_norm todo fix it to be sure we have no nan fstm still requires this hack fstm should probably be refactored similar to bart afterward test that the model can be serialized and restored properly overwrite from test_modeling_common hubert has no inputs_embeds input_ids is renamed to input_values hubert cannot resize token embeddings since it has no tokens embeddings hubert has no inputs_embeds and thus the get_input_embeddings fn is not implemented no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common because of overlap mask don t have to add up exactly to mask_prob sequence_length but have to be smaller or equal automatic decoding with librispeech s3prl logits for the same batch todo lower the tolerance after merging the padding fix https github com pytorch fairseq pull 3572 s3prl logits for the same batch todo lower the tolerance after merging the padding fix https github com pytorch fairseq pull 3572 s3prl logits for the same batch todo lower the tolerance after merging the padding fix https github com pytorch fairseq pull 3572 todo can t test on batched inputs due to incompatible padding https github com pytorch fairseq pull 3572 expected outputs taken from the original sew implementation
import math import os import pickle import tempfile import unittest import pytest from transformers import HubertConfig, is_torch_available from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device from transformers.utils import is_torch_fx_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( HubertForCTC, HubertForSequenceClassification, HubertModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.models.hubert.modeling_hubert import _compute_mask_indices if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace class HubertModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return HubertConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, do_stable_layer_norm=self.do_stable_layer_norm, ) def create_and_check_model(self, config, input_values, attention_mask): model = HubertModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): model = HubertModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = HubertForCTC(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = HubertForSequenceClassification(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = HubertForCTC(config=config) model.to(torch_device) model.train() model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = HubertForSequenceClassification(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = HubertForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class HubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (HubertForCTC, HubertForSequenceClassification, HubertModel) if is_torch_available() else () pipeline_model_mapping = ( { "audio-classification": HubertForSequenceClassification, "automatic-speech-recognition": HubertForCTC, "feature-extraction": HubertModel, } if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_headmasking = False def setUp(self): self.model_tester = HubertModelTester(self) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "quantizer.weight_proj.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): self.skipTest("torch 2.1 breaks torch fx tests for wav2vec2/hubert.") if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = HubertModel.from_pretrained("facebook/hubert-base-ls960") self.assertIsNotNone(model) @require_torch class HubertRobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (HubertForCTC, HubertForSequenceClassification, HubertModel) if is_torch_available() else () test_pruning = False test_headmasking = False def setUp(self): self.model_tester = HubertModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "quantizer.weight_proj.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft") self.assertIsNotNone(model) @require_torch class HubertUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) @require_torch @require_soundfile @slow class HubertModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): from datasets import load_dataset ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_ctc_batched(self): model = HubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft", torch_dtype=torch.float16).to( torch_device ) processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.half().to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_keyword_spotting(self): model = HubertForSequenceClassification.from_pretrained( "superb/hubert-base-superb-ks", torch_dtype=torch.float16 ).to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-ks") input_data = self._load_superb("ks", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.half().to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [2, 6, 10, 9] expected_logits = torch.tensor([7.6692, 17.7795, 11.1562, 11.8232], dtype=torch.float16, device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=3e-2)) def test_inference_intent_classification(self): model = HubertForSequenceClassification.from_pretrained( "superb/hubert-base-superb-ic", torch_dtype=torch.float16 ).to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-ic") input_data = self._load_superb("ic", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.half().to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits_action, predicted_ids_action = torch.max(outputs.logits[:, :6], dim=-1) predicted_logits_object, predicted_ids_object = torch.max(outputs.logits[:, 6:20], dim=-1) predicted_logits_location, predicted_ids_location = torch.max(outputs.logits[:, 20:24], dim=-1) expected_labels_action = [1, 0, 4, 3] expected_logits_action = torch.tensor( [5.9052, 12.5865, 4.4840, 10.0240], dtype=torch.float16, device=torch_device ) expected_labels_object = [1, 10, 3, 4] expected_logits_object = torch.tensor( [5.5316, 11.7946, 8.1672, 23.2415], dtype=torch.float16, device=torch_device ) expected_labels_location = [0, 0, 0, 1] expected_logits_location = torch.tensor( [5.2053, 8.9577, 10.0447, 8.1481], dtype=torch.float16, device=torch_device ) self.assertListEqual(predicted_ids_action.tolist(), expected_labels_action) self.assertListEqual(predicted_ids_object.tolist(), expected_labels_object) self.assertListEqual(predicted_ids_location.tolist(), expected_labels_location) self.assertTrue(torch.allclose(predicted_logits_action, expected_logits_action, atol=3e-1)) self.assertTrue(torch.allclose(predicted_logits_object, expected_logits_object, atol=3e-1)) self.assertTrue(torch.allclose(predicted_logits_location, expected_logits_location, atol=3e-1)) def test_inference_speaker_identification(self): model = HubertForSequenceClassification.from_pretrained( "superb/hubert-base-superb-sid", torch_dtype=torch.float16 ).to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-sid") input_data = self._load_superb("si", 4) output_logits = [] with torch.no_grad(): for example in input_data["speech"]: input = processor(example, return_tensors="pt", padding=True) output = model(input.input_values.half().to(torch_device), attention_mask=None) output_logits.append(output.logits[0]) output_logits = torch.stack(output_logits) predicted_logits, predicted_ids = torch.max(output_logits, dim=-1) expected_labels = [5, 1, 1, 3] expected_logits = torch.tensor( [78231.5547, 123166.6094, 122785.4141, 84851.2969], dtype=torch.float16, device=torch_device ) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=10)) def test_inference_emotion_recognition(self): model = HubertForSequenceClassification.from_pretrained( "superb/hubert-base-superb-er", torch_dtype=torch.float16 ).to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-er") input_data = self._load_superb("er", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.half().to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [1, 1, 2, 2] expected_logits = torch.tensor([2.8384, 2.3389, 3.8564, 4.5558], dtype=torch.float16, device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-1)) def test_inference_distilhubert(self): model = HubertModel.from_pretrained("ntu-spml/distilhubert").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("ntu-spml/distilhubert") input_speech = self._load_datasamples(1) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): outputs = model(input_values).last_hidden_state expected_outputs_first = torch.tensor( [ [ [-0.3505, 0.1167, 0.0608, 0.1294], [-0.3085, 0.0481, 0.1106, 0.0955], [-0.3107, -0.0391, 0.0739, 0.1360], [-0.2385, -0.1795, -0.0928, 0.2389], ] ], device=torch_device, ) expected_outputs_last = torch.tensor( [ [ [-0.0732, 0.0255, 0.0529, -0.1372], [-0.0812, 0.1259, 0.0564, -0.0438], [-0.0054, 0.0758, -0.0002, -0.1617], [0.0133, -0.0320, -0.0687, 0.0062], ] ], device=torch_device, ) expected_output_sum = -3776.0730 self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=5e-3)) self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=5e-3)) self.assertTrue(abs(outputs.sum() - expected_output_sum) < 0.1)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 convert values that are over inputlengths to padding convert values that are over inputlengths to padding freeze feature encoder overwrite because inputvalues inputids signature parameters is an ordereddict so argnames order is deterministic overwrite because inputvalues inputids todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc we override the base test here to skip loss calculation for hubert models because the loss is massive with the default labels and frequently overflows to inf or exceeds numerical tolerances between tfpt output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e4 1e9 1e30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it check we can load pt model in tf and viceversa with model model functions original test check without labels check we can load pt model in tf and viceversa with checkpoint model functions original test check without labels overwrite because inputvalues inputids signature parameters is an ordereddict so argnames order is deterministic overwrite because inputvalues inputids todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc we override the base test here to skip loss calculation for hubert models because the loss is massive with the default labels and frequently overflows to inf or exceeds numerical tolerances between tfpt output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e4 1e9 1e30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it check we can load pt model in tf and viceversa with model model functions original test check without labels check we can load pt model in tf and viceversa with checkpoint model functions original test check without labels because of overlap mask don t have to add up exactly to maskprob sequencelength but have to be smaller or equal automatic decoding with librispeech coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this is most likely not correctly set yet test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 convert values that are over input_lengths to padding convert values that are over input_lengths to padding freeze feature encoder overwrite because input_values input_ids signature parameters is an ordereddict so arg_names order is deterministic overwrite because input_values input_ids todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc we override the base test here to skip loss calculation for hubert models because the loss is massive with the default labels and frequently overflows to inf or exceeds numerical tolerances between tf pt output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e 4 1e 9 1e 30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it skip the tf at the beginning check we can load pt model in tf and vice versa with model model functions original test check without labels check we can load pt model in tf and vice versa with checkpoint model functions original test check without labels overwrite because input_values input_ids signature parameters is an ordereddict so arg_names order is deterministic overwrite because input_values input_ids todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc we override the base test here to skip loss calculation for hubert models because the loss is massive with the default labels and frequently overflows to inf or exceeds numerical tolerances between tf pt output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e 4 1e 9 1e 30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it skip the tf at the beginning check we can load pt model in tf and vice versa with model model functions original test check without labels check we can load pt model in tf and vice versa with checkpoint model functions original test check without labels because of overlap mask don t have to add up exactly to mask_prob sequence_length but have to be smaller or equal automatic decoding with librispeech
from __future__ import annotations import copy import inspect import math import os import tempfile import unittest import numpy as np import pytest from transformers import is_tf_available from transformers.testing_utils import is_pt_tf_cross_test, require_soundfile, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import HubertConfig, TFHubertForCTC, TFHubertModel, Wav2Vec2Processor from transformers.models.hubert.modeling_tf_hubert import _compute_mask_indices @require_tf class TFHubertModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = tf.cast(ids_tensor([self.batch_size, self.seq_length], 32768), tf.float32) / 32768.0 attention_mask = tf.ones_like(input_values) config = HubertConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, do_stable_layer_norm=self.do_stable_layer_norm, ) return config, input_values, attention_mask def create_and_check_model(self, config, input_values, attention_mask): model = TFHubertModel(config) result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): config.layerdrop = 0.0 model = TFHubertModel(config) input_values = input_values[:3] attention_mask = tf.ones_like(input_values) input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) length_mask = tf.sequence_mask(input_lengths, dtype=tf.float32) input_values = input_values * length_mask attention_mask = attention_mask * length_mask batch_outputs = model(input_values, attention_mask=attention_mask, training=False).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice, training=False).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(np.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = TFHubertForCTC(config) input_values = input_values[:3] attention_mask = tf.ones_like(input_values) input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) max_length_labels = model.hubert._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) length_mask = tf.sequence_mask(input_lengths, dtype=tf.float32) input_values = input_values * length_mask attention_mask = attention_mask * length_mask model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss self.parent.assertTrue(abs(labels.shape[0] * mean_loss - sum_loss) < 1e-2) def check_training(self, config, input_values, *args): model = TFHubertForCTC(config) model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) max_length_labels = model.hubert._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) length_mask = tf.sequence_mask(input_lengths, dtype=tf.float32) input_values = input_values * length_mask pad_size = max(max_length_labels) - labels.shape[1] labels = tf.pad(labels, ((0, 0), (0, pad_size)), constant_values=-100) loss = model(input_values, labels=labels, training=True).loss self.parent.assertFalse(tf.math.is_inf(loss)) def check_labels_out_of_vocab(self, config, input_values, *args): model = TFHubertForCTC(config) input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) max_length_labels = model.hubert._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_tf class TFHubertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFHubertModel, TFHubertForCTC) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFHubertModel} if is_tf_available() else {} test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFHubertModelTester(self) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_values = inputs_keywords.pop("input_values", None) outputs_keywords = model(input_values, **inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.output_seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Hubert has no input embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="Hubert has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Hubert has no input embeddings") def test_model_common_attributes(self): pass @slow def test_model_from_pretrained(self): model = TFHubertModel.from_pretrained("facebook/hubert-base-ls960") self.assertIsNotNone(model) @unittest.skip(reason="Fix me! Hubert hits OOM errors when loss is computed on full batch") def test_dataset_conversion(self): pass @unittest.skip(reason="Fix me! Hubert hits OOM errors when loss is computed on full batch") def test_keras_fit(self): pass @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import torch import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) @require_tf class TFHubertRobustModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFHubertModel, TFHubertForCTC) if is_tf_available() else () test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFHubertModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True, scope="robust", ) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_values = inputs_keywords.pop("input_values", None) outputs_keywords = model(input_values, **inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.output_seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Hubert has no input embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="Hubert has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Hubert has no input embeddings or get_input_embeddings method") def test_model_common_attributes(self): pass @slow def test_model_from_pretrained(self): model = TFHubertModel.from_pretrained("facebook/hubert-large-ls960-ft") self.assertIsNotNone(model) @unittest.skip(reason="Fix me! Hubert hits OOM errors when loss is computed on full batch") def test_dataset_conversion(self): pass @unittest.skip(reason="Fix me! Hubert hits OOM errors when loss is computed on full batch") def test_keras_fit(self): pass @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import torch import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) @require_tf class TFHubertUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) self.assertListEqual( tf.reduce_sum(mask, -1).numpy().tolist(), [mask_prob * sequence_length for _ in range(batch_size)] ) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) for batch_sum in tf.reduce_sum(mask, -1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) @require_tf @slow @require_soundfile class TFHubertModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_ctc_normal(self): model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft") processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="tf", sampling_rate=16000).input_values logits = model(input_values).logits predicted_ids = tf.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched(self): model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft") processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft", do_lower_case=True) input_speech = self._load_datasamples(2) input_values = processor(input_speech, return_tensors="tf", padding=True, sampling_rate=16000).input_values logits = model(input_values).logits predicted_ids = tf.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_robust_batched(self): model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft") processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="tf", padding=True, sampling_rate=16000) input_values = inputs.input_values attention_mask = inputs.attention_mask logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = tf.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant of panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license ibert only supports absolute embedding ensure that the default position ids only assign a sequential this is a regression test for https github comhuggingfacetransformersissues1761 the position ids should be masked with the embedding object s padding index therefore the first available nonpadding position index is ibertembeddings paddingidx 1 ensure that the default position ids only assign a sequential this is a regression test for https github comhuggingfacetransformersissues1761 the position ids should be masked with the embedding object s padding index therefore the first available nonpadding position index is ibertembeddings paddingidx 1 override override override scaling factor should follow the symmetric quantization rule quantization error should not exceed the scaling factor first pass after the first pass xmin and xmax should be initialized with x min and x max scaling factor should follow the symmetric quantization rule quantization error should not exceed the scaling factor output should be integer second pass from the second pass xmin and xmax should be updated with moving average scaling factor should follow the symmetric quantization rule quantization error should not exceed the scaling factor output should be integer third pass with eval in eval mode minmax and scaling factor must be fixed test if identity and identityscalingfactor are given should add the input values scaling factor should follow the symmetric quantization rule output of the normal linear layer and the quantized linear layer should be similar output of the quantized linear layer should be integer output of the normal gelu and the quantized gelu should be similar output of the quantized gelu layer should be integer output of the normal softmax and the quantized softmax should be similar output of the quantized gelu layer should be integer output of the quantize softmax should not exceed the outputbit some random matrix output of the normal ln and the quantized ln should be similar output of the quantized gelu layer should be integer helper function that quantizes the given model recursively convert all the quantmode attributes as true ibert should be equivalent to roberta if not quantized test coped from testmodelingroberta py ibert should be similar to roberta if quantized ibert should be equivalent to roberta if not quantized test coped from testmodelingroberta py ibert should be similar to roberta if quantized coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license i bert only supports absolute embedding ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is ibertembeddings padding_idx 1 ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is ibertembeddings padding_idx 1 override override i bert does not support chunking override scaling factor should follow the symmetric quantization rule quantization error should not exceed the scaling factor first pass after the first pass x_min and x_max should be initialized with x min and x max scaling factor should follow the symmetric quantization rule quantization error should not exceed the scaling factor output should be integer second pass from the second pass x_min and x_max should be updated with moving average scaling factor should follow the symmetric quantization rule quantization error should not exceed the scaling factor output should be integer third pass with eval in eval mode min max and scaling factor must be fixed test if identity and identity_scaling_factor are given should add the input values scaling factor should follow the symmetric quantization rule output of the normal linear layer and the quantized linear layer should be similar output of the quantized linear layer should be integer output of the normal gelu and the quantized gelu should be similar output of the quantized gelu layer should be integer output of the normal softmax and the quantized softmax should be similar output of the quantized gelu layer should be integer output of the quantize softmax should not exceed the output_bit some random matrix output of the normal ln and the quantized ln should be similar output of the quantized gelu layer should be integer helper function that quantizes the given model recursively convert all the quant_mode attributes as true i bert should be equivalent to roberta if not quantized test coped from test_modeling_roberta py i bert should be similar to roberta if quantized i bert should be equivalent to roberta if not quantized test coped from test_modeling_roberta py i bert should be similar to roberta if quantized
import copy import unittest from transformers import IBertConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, ) from transformers.models.ibert.modeling_ibert import ( IBertEmbeddings, IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear, create_position_ids_from_input_ids, ) class IBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return IBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, quant_mode=True, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = IBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = IBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = IBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = IBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = IBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class IBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = False test_head_masking = False test_resize_embeddings = False all_model_classes = ( ( IBertForMaskedLM, IBertModel, IBertForSequenceClassification, IBertForTokenClassification, IBertForMultipleChoice, IBertForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": IBertModel, "fill-mask": IBertForMaskedLM, "question-answering": IBertForQuestionAnswering, "text-classification": IBertForSequenceClassification, "token-classification": IBertForTokenClassification, "zero-shot": IBertForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = IBertModelTester(self) self.config_tester = ConfigTester(self, config_class=IBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in IBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = IBertModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_create_position_ids_respects_padding_index(self): config = self.model_tester.prepare_config_and_inputs()[0] model = IBertEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): config = self.model_tester.prepare_config_and_inputs()[0] embeddings = IBertEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), QuantEmbedding) model.set_input_embeddings(nn.Embedding(10, 10)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_feed_forward_chunking(self): pass def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: embed, embed_scaling_factor = wte(input_ids) inputs["inputs_embeds"] = embed else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch class IBertModelIntegrationTest(unittest.TestCase): def test_quant_embedding(self): weight_bit = 8 embedding = QuantEmbedding(2, 4, quant_mode=True, weight_bit=weight_bit) embedding_weight = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]]) embedding.weight = nn.Parameter(embedding_weight) expected_scaling_factor = embedding_weight.abs().max() / (2 ** (weight_bit - 1) - 1) x, x_scaling_factor = embedding(torch.tensor(0)) y, y_scaling_factor = embedding(torch.tensor(1)) self.assertTrue(torch.allclose(x_scaling_factor, expected_scaling_factor, atol=1e-4)) self.assertTrue(torch.allclose(x_scaling_factor, expected_scaling_factor, atol=1e-4)) self.assertTrue(torch.allclose(y_scaling_factor, expected_scaling_factor, atol=1e-4)) self.assertTrue(torch.allclose(x, embedding_weight[0], atol=expected_scaling_factor)) self.assertTrue(torch.allclose(y, embedding_weight[1], atol=expected_scaling_factor)) def test_quant_act(self): def _test_range(): act = QuantAct(activation_bit, act_range_momentum, quant_mode=True) x = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]]) x_scaling_factor = torch.tensor(1.0) y, y_scaling_factor = act(x, x_scaling_factor) y_int = y / y_scaling_factor expected_x_min, expected_x_max = x.min(), x.max() self.assertTrue(torch.allclose(act.x_min, expected_x_min, atol=1e-4)) self.assertTrue(torch.allclose(act.x_max, expected_x_max, atol=1e-4)) expected_range = torch.max(expected_x_min.abs(), expected_x_max.abs()) expected_scaling_factor = expected_range / (2 ** (activation_bit - 1) - 1) self.assertTrue(torch.allclose(y_scaling_factor, expected_scaling_factor, atol=1e-4)) self.assertTrue(torch.allclose(x, y, atol=expected_scaling_factor)) self.assertTrue(torch.allclose(y_int, y_int.round(), atol=1e-4)) x = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]]) * 2 x_scaling_factor = torch.tensor(1.0) y, y_scaling_factor = act(x, x_scaling_factor) y_int = y / y_scaling_factor expected_x_min = expected_x_min * act_range_momentum + x.min() * (1 - act_range_momentum) expected_x_max = expected_x_max * act_range_momentum + x.max() * (1 - act_range_momentum) self.assertTrue(torch.allclose(act.x_min, expected_x_min, atol=1e-4)) self.assertTrue(torch.allclose(act.x_max, expected_x_max, atol=1e-4)) expected_range = torch.max(expected_x_min.abs(), expected_x_max.abs()) expected_scaling_factor = expected_range / (2 ** (activation_bit - 1) - 1) self.assertTrue(torch.allclose(y_scaling_factor, expected_scaling_factor, atol=1e-4)) x = x.clamp(min=-expected_range, max=expected_range) self.assertTrue(torch.allclose(x, y, atol=expected_scaling_factor)) self.assertTrue(torch.allclose(y_int, y_int.round(), atol=1e-4)) act.eval() x = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]]) * 3 self.assertTrue(torch.allclose(act.x_min, expected_x_min, atol=1e-4)) self.assertTrue(torch.allclose(act.x_max, expected_x_max, atol=1e-4)) self.assertTrue(torch.allclose(y_scaling_factor, expected_scaling_factor, atol=1e-4)) def _test_identity(): act = QuantAct(activation_bit, act_range_momentum, quant_mode=True) x = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]]) y = torch.tensor([[6.0, -7.0, 1.0, -2.0], [3.0, -4.0, -8.0, 5.0]]) x_scaling_factor = torch.tensor(1.0) y_scaling_factor = torch.tensor(0.5) z, z_scaling_factor = act(x, x_scaling_factor, y, y_scaling_factor) z_int = z / z_scaling_factor self.assertTrue(torch.allclose(x + y, z, atol=0.1)) self.assertTrue(torch.allclose(z_int, z_int.round(), atol=1e-4)) activation_bit = 8 act_range_momentum = 0.95 _test_range() _test_identity() def test_quant_linear(self): def _test(per_channel): linear_q = QuantLinear(2, 4, quant_mode=True, per_channel=per_channel, weight_bit=weight_bit) linear_dq = QuantLinear(2, 4, quant_mode=False, per_channel=per_channel, weight_bit=weight_bit) linear_weight = torch.tensor([[-1.0, 2.0, 3.0, -4.0], [5.0, -6.0, -7.0, 8.0]]).T linear_q.weight = nn.Parameter(linear_weight) linear_dq.weight = nn.Parameter(linear_weight) q, q_scaling_factor = linear_q(x, x_scaling_factor) q_int = q / q_scaling_factor dq, dq_scaling_factor = linear_dq(x, x_scaling_factor) if per_channel: q_max = linear_weight.abs().max(dim=1).values else: q_max = linear_weight.abs().max() expected_scaling_factor = q_max / (2 ** (weight_bit - 1) - 1) self.assertTrue(torch.allclose(linear_q.fc_scaling_factor, expected_scaling_factor, atol=1e-4)) self.assertTrue(torch.allclose(q, dq, atol=0.5)) self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4)) weight_bit = 8 x = torch.tensor([[2.0, -5.0], [-3.0, 4.0]]) x_scaling_factor = torch.tensor([1.0]) _test(True) _test(False) def test_int_gelu(self): gelu_q = IntGELU(quant_mode=True) gelu_dq = nn.GELU() x_int = torch.arange(-10000, 10001, 1) x_scaling_factor = torch.tensor(0.001) x = x_int * x_scaling_factor q, q_scaling_factor = gelu_q(x, x_scaling_factor) q_int = q / q_scaling_factor dq = gelu_dq(x) self.assertTrue(torch.allclose(q, dq, atol=0.5)) self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4)) def test_force_dequant_gelu(self): x_int = torch.arange(-10000, 10001, 1) x_scaling_factor = torch.tensor(0.001) x = x_int * x_scaling_factor gelu_dq = IntGELU(quant_mode=False) gelu_fdqs_dict = { True: [ IntGELU(quant_mode=True, force_dequant="nonlinear"), IntGELU(quant_mode=True, force_dequant="gelu"), ], False: [ IntGELU(quant_mode=True, force_dequant="none"), IntGELU(quant_mode=True, force_dequant="softmax"), IntGELU(quant_mode=True, force_dequant="layernorm"), ], } dq, dq_scaling_factor = gelu_dq(x, x_scaling_factor) for label, gelu_fdqs in gelu_fdqs_dict.items(): for gelu_fdq in gelu_fdqs: q, q_scaling_factor = gelu_fdq(x, x_scaling_factor) if label: self.assertTrue(torch.allclose(q, dq, atol=1e-4)) else: self.assertFalse(torch.allclose(q, dq, atol=1e-4)) def test_int_softmax(self): output_bit = 8 softmax_q = IntSoftmax(output_bit, quant_mode=True) softmax_dq = nn.Softmax() def _test(array): x_int = torch.tensor(array) x_scaling_factor = torch.tensor(0.1) x = x_int * x_scaling_factor q, q_scaling_factor = softmax_q(x, x_scaling_factor) q_int = q / q_scaling_factor dq = softmax_dq(x) self.assertTrue(torch.allclose(q, dq, atol=0.5)) self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4)) self.assertTrue(q.abs().max() < 2**output_bit) array = [[i + j for j in range(10)] for i in range(-10, 10)] _test(array) array = [[i + j for j in range(50)] for i in range(-10, 10)] _test(array) array = [[i + 100 * j for j in range(2)] for i in range(-10, 10)] _test(array) def test_force_dequant_softmax(self): output_bit = 8 array = [[i + j for j in range(10)] for i in range(-10, 10)] x_int = torch.tensor(array) x_scaling_factor = torch.tensor(0.1) x = x_int * x_scaling_factor softmax_dq = IntSoftmax(output_bit, quant_mode=False) softmax_fdqs_dict = { True: [ IntSoftmax(output_bit, quant_mode=True, force_dequant="nonlinear"), IntSoftmax(output_bit, quant_mode=True, force_dequant="softmax"), ], False: [ IntSoftmax(output_bit, quant_mode=True, force_dequant="none"), IntSoftmax(output_bit, quant_mode=True, force_dequant="gelu"), IntSoftmax(output_bit, quant_mode=True, force_dequant="layernorm"), ], } dq, dq_scaling_factor = softmax_dq(x, x_scaling_factor) for label, softmax_fdqs in softmax_fdqs_dict.items(): for softmax_fdq in softmax_fdqs: q, q_scaling_factor = softmax_fdq(x, x_scaling_factor) if label: self.assertTrue(torch.allclose(q, dq, atol=1e-4)) else: self.assertFalse(torch.allclose(q, dq, atol=1e-4)) def test_int_layernorm(self): output_bit = 8 array = [[[i * j * j + j for j in range(5, 15)]] for i in range(-10, 10)] x_int = torch.tensor(array) x_scaling_factor = torch.tensor(0.1) x = x_int * x_scaling_factor ln_q = IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit) ln_dq = nn.LayerNorm(x.shape[1:], 1e-5) ln_q.weight = nn.Parameter(torch.ones(x.shape[1:])) ln_q.bias = nn.Parameter(torch.ones(x.shape[1:])) ln_dq.weight = nn.Parameter(torch.ones(x.shape[1:])) ln_dq.bias = nn.Parameter(torch.ones(x.shape[1:])) q, q_scaling_factor = ln_q(x, x_scaling_factor) q_int = q / q_scaling_factor dq = ln_dq(x) self.assertTrue(torch.allclose(q, dq, atol=0.5)) self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4)) def test_force_dequant_layernorm(self): output_bit = 8 array = [[[i * j * j + j for j in range(5, 15)]] for i in range(-10, 10)] x_int = torch.tensor(array) x_scaling_factor = torch.tensor(0.1) x = x_int * x_scaling_factor ln_dq = IntLayerNorm(x.shape[1:], 1e-5, quant_mode=False, output_bit=output_bit) ln_fdqs_dict = { True: [ IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="nonlinear"), IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="layernorm"), ], False: [ IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="none"), IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="gelu"), IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="softmax"), ], } ln_dq.weight = nn.Parameter(torch.ones(x.shape[1:])) ln_dq.bias = nn.Parameter(torch.ones(x.shape[1:])) dq, dq_scaling_factor = ln_dq(x, x_scaling_factor) for label, ln_fdqs in ln_fdqs_dict.items(): for ln_fdq in ln_fdqs: ln_fdq.weight = nn.Parameter(torch.ones(x.shape[1:])) ln_fdq.bias = nn.Parameter(torch.ones(x.shape[1:])) q, q_scaling_factor = ln_fdq(x, x_scaling_factor) if label: self.assertTrue(torch.allclose(q, dq, atol=1e-4)) else: self.assertFalse(torch.allclose(q, dq, atol=1e-4)) def quantize(self, model): if hasattr(model, "quant_mode"): model.quant_mode = True elif type(model) == nn.Sequential: for n, m in model.named_children(): self.quantize(m) elif type(model) == nn.ModuleList: for n in model: self.quantize(n) else: for attr in dir(model): mod = getattr(model, attr) if isinstance(mod, nn.Module) and mod != model: self.quantize(mod) @slow def test_inference_masked_lm(self): model = IBertForMaskedLM.from_pretrained("kssteven/ibert-roberta-base") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 50265)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) self.quantize(model) output = model(input_ids)[0] self.assertEqual(output.shape, expected_shape) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=0.1)) @slow def test_inference_classification_head(self): model = IBertForSequenceClassification.from_pretrained("kssteven/ibert-roberta-large-mnli") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 3)) self.assertEqual(output.shape, expected_shape) expected_tensor = torch.tensor([[-0.9469, 0.3913, 0.5118]]) self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4)) self.quantize(model) output = model(input_ids)[0] self.assertEqual(output.shape, expected_shape) self.assertTrue(torch.allclose(output, expected_tensor, atol=0.1))
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license self size size this function computes the expected height and width when providing images to ideficsimageprocessor assuming doresize is set to true with a scalar size and sizedivisor as we had to reimplement the torchvision transforms using transformers utils we must check they both do the same image convertrgb would only work for jpg images as it creates a wrong background for transparent images the call to alphacomposite handles this case coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license self size size this function computes the expected height and width when providing images to ideficsimageprocessor assuming do_resize is set to true with a scalar size and size_divisor as we had to reimplement the torchvision transforms using transformers utils we must check they both do the same image convert rgb would only work for jpg images as it creates a wrong background for transparent images the call to alpha_composite handles this case
import unittest from transformers.testing_utils import require_torch, require_torchvision, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_torchvision_available(): import torchvision.transforms as transforms if is_vision_available(): from PIL import Image from transformers import IdeficsImageProcessor class IdeficsImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, size=None, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], ): size = size if size is not None else {"shortest_edge": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "image_size": self.image_size, } def get_expected_values(self, image_inputs, batched=False): if not batched: size = self.image_size image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] scale = size / min(w, h) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size max_size = int((1333 / 800) * size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return (self.num_channels, height, width) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class IdeficsImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = IdeficsImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = IdeficsImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "image_size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertNotEqual(image_processor.image_size, 30) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, image_size=42) self.assertEqual(image_processor.image_size, 42) @require_torchvision def test_torchvision_numpy_transforms_equivalency(self): image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) image_processor = self.image_processing_class(**self.image_processor_dict) print(image_inputs) def convert_to_rgb(image): if image.mode == "RGB": return image image_rgba = image.convert("RGBA") background = Image.new("RGBA", image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert("RGB") return alpha_composite image_size = image_processor.image_size image_mean = image_processor.image_mean image_std = image_processor.image_std transform = transforms.Compose( [ convert_to_rgb, transforms.Resize((image_size, image_size), interpolation=transforms.InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize(mean=image_mean, std=image_std), ] ) pixel_values_transform_implied = image_processor(image_inputs, transform=None) pixel_values_transform_supplied = image_processor(image_inputs, transform=transform) torch.testing.assert_close(pixel_values_transform_implied, pixel_values_transform_supplied, rtol=0.0, atol=0.0) @unittest.skip("not supported") def test_call_numpy(self): pass @unittest.skip("not supported") def test_call_numpy_4_channels(self): pass @unittest.skip("not supported") def test_call_pil(self): pass @unittest.skip("not supported") def test_call_pytorch(self): pass
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images numimages 2 images np random randint255 size3 30 400 dtypenp uint8 for x in rangenumimages images image fromarraynp moveaxisx 0 1 for x in images printtypex for x in images die prompts text and 1 image user images0 describe this image nassistant text and images user images0 describe this image nassistant an image of two dogs n user images1 describe this image nassistant only text user describe this image nassistant an image of two kittens n user describe this image nassistant only images images0 images1 return prompts def testsaveloadpretrainedadditionalfeaturesself processor ideficsprocessortokenizerself gettokenizer imageprocessorself getimageprocessor processor savepretrainedself checkpointpath tokenizeraddkwargs self gettokenizerbostokenbos eostokeneos imageprocessoraddkwargs self getimageprocessordonormalizefalse paddingvalue1 0 processor ideficsprocessor frompretrained self checkpointpath bostokenbos eostokeneos donormalizefalse paddingvalue1 0 self assertequalprocessor tokenizer getvocab tokenizeraddkwargs getvocab self assertisinstanceprocessor tokenizer pretrainedtokenizerfast self assertequalprocessor imageprocessor tojsonstring imageprocessoraddkwargs tojsonstring self assertisinstanceprocessor imageprocessor ideficsimageprocessor def testprocessorself imageprocessor self getimageprocessor tokenizer self gettokenizer processor ideficsprocessortokenizertokenizer imageprocessorimageprocessor prompts self prepareprompts test that all prompts succeeded inputprocessor processorprompts returntensorspt for key in self inputkeys assert torch istensorinputprocessorkey def testtokenizerdecodeself imageprocessor self getimageprocessor tokenizer self gettokenizer processor ideficsprocessortokenizertokenizer imageprocessorimageprocessor predictedids 1 4 5 8 1 0 8 3 4 3 1 1 8 9 decodedprocessor processor batchdecodepredictedids decodedtok tokenizer batchdecodepredictedids self assertlistequaldecodedtok decodedprocessor def testtokenizerpaddingself imageprocessor self getimageprocessor tokenizer self gettokenizerpaddingsideright processor ideficsprocessortokenizertokenizer imageprocessorimageprocessor predictedtokens s describe this image nassistant unkunkunkunkunkunkunkunkunk s describe this image nassistant unkunkunkunkunkunkunkunkunkunk prompts prompt for prompt in self prepareprompts2 maxlength processorprompts paddingmaxlength truncationtrue maxlength20 longest processorprompts paddinglongest truncationtrue maxlength30 decodedmaxlength processor tokenizer decodemaxlengthinputids1 decodedlongest processor tokenizer decodelongestinputids1 self assertequaldecodedmaxlength predictedtokens1 self assertequaldecodedlongest predictedtokens0 def testmodelinputnamesself imageprocessor self getimageprocessor tokenizer self gettokenizer processor ideficsprocessortokenizertokenizer imageprocessorimageprocessor prompts self prepareprompts inputs processorprompts for now the processor supports only pixelvalues inputids attentionmask self assertsetequalsetinputs keys setself inputkeys 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images print type x for x in images die text and 1 image text and images only text only images test that all prompts succeeded for now the processor supports only pixel_values input_ids attention_mask
import numpy as np from transformers.testing_utils import TestCasePlus, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, IdeficsImageProcessor, IdeficsProcessor, LlamaTokenizerFast, PreTrainedTokenizerFast, ) @require_torch @require_vision class IdeficsProcessorTest(TestCasePlus): def setUp(self): super().setUp() self.checkpoint_path = self.get_auto_remove_tmp_dir() image_processor = IdeficsImageProcessor() tokenizer = LlamaTokenizerFast.from_pretrained("HuggingFaceM4/tiny-random-idefics") processor = IdeficsProcessor(image_processor, tokenizer) processor.save_pretrained(self.checkpoint_path) self.input_keys = ["pixel_values", "input_ids", "attention_mask", "image_attention_mask"] def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.checkpoint_path, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.checkpoint_path, **kwargs).image_processor def prepare_prompts(self): num_images = 2 images = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8) for x in range(num_images)] images = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in images] prompts = [ [ "User:", images[0], "Describe this image.\nAssistant:", ], [ "User:", images[0], "Describe this image.\nAssistant: An image of two dogs.\n", "User:", images[1], "Describe this image.\nAssistant:", ], [ "User:", "Describe this image.\nAssistant: An image of two kittens.\n", "User:", "Describe this image.\nAssistant:", ], [ images[0], images[1], ], ] return prompts def test_save_load_pretrained_additional_features(self): processor = IdeficsProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.checkpoint_path) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = IdeficsProcessor.from_pretrained( self.checkpoint_path, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, IdeficsImageProcessor) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) prompts = self.prepare_prompts() input_processor = processor(prompts, return_tensors="pt") for key in self.input_keys: assert torch.is_tensor(input_processor[key]) def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_tokenizer_padding(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer(padding_side="right") processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_tokens = [ "<s> Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk>", "<s> Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk><unk>", ] prompts = [[prompt] for prompt in self.prepare_prompts()[2]] max_length = processor(prompts, padding="max_length", truncation=True, max_length=20) longest = processor(prompts, padding="longest", truncation=True, max_length=30) decoded_max_length = processor.tokenizer.decode(max_length["input_ids"][-1]) decoded_longest = processor.tokenizer.decode(longest["input_ids"][-1]) self.assertEqual(decoded_max_length, predicted_tokens[1]) self.assertEqual(decoded_longest, predicted_tokens[0]) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor) prompts = self.prepare_prompts() inputs = processor(prompts) self.assertSetEqual(set(inputs.keys()), set(self.input_keys))
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license here we create 2 clusters for the sake of simplicity override the test from imageprocessingtestmixin as imagegpt model takes inputids as input initialize imageprocessing create random pil images test not batched input test batched override the test from imageprocessingtestmixin as imagegpt model takes inputids as input initialize imageprocessing create random numpy tensors test not batched input test batched override the test from imageprocessingtestmixin as imagegpt model takes inputids as input initialize imageprocessing create random pytorch tensors test not batched input test batched test nonbatched test batched coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license here we create 2 clusters for the sake of simplicity override the test from imageprocessingtestmixin as imagegpt model takes input_ids as input initialize image_processing create random pil images test not batched input test batched override the test from imageprocessingtestmixin as imagegpt model takes input_ids as input initialize image_processing create random numpy tensors test not batched input test batched override the test from imageprocessingtestmixin as imagegpt model takes input_ids as input initialize image_processing create random pytorch tensors test not batched input test batched test non batched test batched
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class ImageGPTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize def prepare_image_processor_dict(self): return { "clusters": np.asarray( [ [0.8866443634033203, 0.6618829369544983, 0.3891746401786804], [-0.6042559146881104, -0.02295008860528469, 0.5423797369003296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } def expected_output_image_shape(self, images): return (self.size["height"] * self.size["width"],) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ImageGPTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ImageGPTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ImageGPTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "clusters")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_image_processor_to_json_string(self): image_processor = self.image_processing_class(**self.image_processor_dict) obj = json.loads(image_processor.to_json_string()) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(value, obj[key])) else: self.assertEqual(obj[key], value) def test_image_processor_to_json_file(self): image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "image_processor.json") image_processor_first.to_json_file(json_file_path) image_processor_second = self.image_processing_class.from_json_file(json_file_path).to_dict() image_processor_first = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(value, image_processor_second[key])) else: self.assertEqual(image_processor_first[key], value) def test_image_processor_from_and_save_pretrained(self): image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(tmpdirname) image_processor_second = self.image_processing_class.from_pretrained(tmpdirname).to_dict() image_processor_first = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(value, image_processor_second[key])) else: self.assertEqual(image_processor_first[key], value) @unittest.skip("ImageGPT requires clusters at initialization") def test_init_without_params(self): pass def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors="pt").input_ids expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(encoded_images) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) encoded_images = image_processing(image_inputs, return_tensors="pt").input_ids self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors="pt").input_ids expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(encoded_images) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) encoded_images = image_processing(image_inputs, return_tensors="pt").input_ids self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) @unittest.skip("ImageGPT assumes clusters for 3 channels") def test_call_numpy_4_channels(self): pass def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors="pt").input_ids self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) encoded_images = image_processing(image_inputs, return_tensors="pt").input_ids self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape), ) def prepare_images(): dataset = load_dataset("hf-internal-testing/fixtures_image_utils", split="test") image1 = Image.open(dataset[4]["file"]) image2 = Image.open(dataset[5]["file"]) images = [image1, image2] return images @require_vision @require_torch class ImageGPTImageProcessorIntegrationTest(unittest.TestCase): @slow def test_image(self): image_processing = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small") images = prepare_images() encoding = image_processing(images[0], return_tensors="pt") self.assertIsInstance(encoding.input_ids, torch.LongTensor) self.assertEqual(encoding.input_ids.shape, (1, 1024)) expected_slice = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist(), expected_slice) encoding = image_processing(images, return_tensors="pt") self.assertIsInstance(encoding.input_ids, torch.LongTensor) self.assertEqual(encoding.input_ids.shape, (2, 1024)) expected_slice = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist(), expected_slice)
codingutf8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license imagegptforcausalimagemodeling doens t have tied input and output embeddings as imagegptforimageclassification isn t included in any auto mapping we add labels here we overwrite the checkscores method of generationtestermixin as imagegptforcausalimagemodeling doesn t have tied input and output embeddings signature parameters is an ordereddict so argnames order is deterministic retrieve the embeddings and clone theme check that resizing the token embeddings with a larger vocab size increases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary check that adding and removing tokens has not modified the first part of the embedding matrix if model cannot untied embeddings leave test if no output embeddings leave test check that resizing the token embeddings with a larger vocab size increases the model s vocab size check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check bias if present check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary check that the model can still do a forward pass successfully every parameter should be resized we will verify our results on an image of cute cats forward pass verify the logits coding utf 8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license imagegptforcausalimagemodeling doens t have tied input and output embeddings as imagegptforimageclassification isn t included in any auto mapping we add labels here we overwrite the _check_scores method of generationtestermixin as imagegptforcausalimagemodeling doesn t have tied input and output embeddings signature parameters is an ordereddict so arg_names order is deterministic retrieve the embeddings and clone theme check that resizing the token embeddings with a larger vocab size increases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary check that adding and removing tokens has not modified the first part of the embedding matrix if model cannot untied embeddings leave test if no output embeddings leave test check that resizing the token embeddings with a larger vocab size increases the model s vocab size check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check bias if present check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary check that the model can still do a forward pass successfully every parameter should be resized to be sure we have no nan and it s not used enough to be worth fixing we will verify our results on an image of cute cats forward pass verify the logits
import copy import inspect import os import tempfile import unittest from transformers import ImageGPTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST, ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel, ) if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class ImageGPTModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None def get_large_model_config(self): return ImageGPTConfig.from_pretrained("imagegpt") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): pixel_values = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return ImageGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_inner=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 513 config.max_position_embeddings = 1024 return config def prepare_config_and_inputs_for_decoder(self): ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_imagegpt_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values, token_type_ids=token_type_ids, head_mask=head_mask) result = model(pixel_values, token_type_ids=token_type_ids) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_lm_head_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTForCausalImageModeling(config) model.to(torch_device) model.eval() labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) result = model(pixel_values, token_type_ids=token_type_ids, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size - 1)) def create_and_check_imagegpt_for_image_classification( self, config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = ImageGPTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class ImageGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel) if is_torch_available() else () ) all_generative_model_classes = (ImageGPTForCausalImageModeling,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": ImageGPTModel, "image-classification": ImageGPTForImageClassification} if is_torch_available() else {} ) test_missing_keys = False input_name = "pixel_values" def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "ImageGPTForImageClassification": inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size - 1) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def setUp(self): self.model_tester = ImageGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=ImageGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_imagegpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_model(*config_and_inputs) def test_imagegpt_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_imagegpt_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_for_image_classification(*config_and_inputs) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): for model_name in IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ImageGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) if model.get_output_embeddings() is None: continue model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pixel_values = inputs["pixel_values"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(pixel_values) with torch.no_grad(): model(**inputs)[0] def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: pixel_values = inputs["pixel_values"] traced_model = torch.jit.trace(model, pixel_values) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @unittest.skip("The model doesn't support left padding") def test_left_padding_compatibility(self): pass def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ImageGPTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small") if is_vision_available() else None @slow def test_inference_causal_lm_head(self): model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1024, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[2.3445, 2.6889, 2.7313], [1.0530, 1.2416, 0.5699], [0.2205, 0.7749, 0.3953]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch informer model import inspect import tempfile import unittest import numpy as np from huggingfacehub import hfhubdownload from transformers import istorchavailable from transformers testingutils import isflaky requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin tolerance 1e4 if istorchavailable import torch from transformers import informerconfig informerforprediction informermodel from transformers models informer modelinginformer import informerdecoder informerencoder requiretorch class informermodeltester def init self parent batchsize13 predictionlength7 contextlength14 cardinality19 embeddingdimension5 numtimefeatures4 istrainingtrue hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 lagssequence1 2 3 4 5 samplingfactor10 distilfalse self parent parent self batchsize batchsize self predictionlength predictionlength self contextlength contextlength self cardinality cardinality self numtimefeatures numtimefeatures self lagssequence lagssequence self embeddingdimension embeddingdimension self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self encoderseqlength min samplingfactor np ceilnp log1pcontextlength astypeint item contextlength self decoderseqlength min samplingfactor np ceilnp log1ppredictionlength astypeint item predictionlength self samplingfactor samplingfactor self distil distil def getconfigself return informerconfig predictionlengthself predictionlength dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob contextlengthself contextlength lagssequenceself lagssequence numtimefeaturesself numtimefeatures numstaticcategoricalfeatures1 numstaticrealfeatures1 cardinalityself cardinality embeddingdimensionself embeddingdimension samplingfactorself samplingfactor distilself distil def prepareinformerinputsdictself config pastlength config contextlength maxconfig lagssequence staticcategoricalfeatures idstensorself batchsize 1 config cardinality0 staticrealfeatures floatstensorself batchsize 1 pasttimefeatures floatstensorself batchsize pastlength config numtimefeatures pastvalues floatstensorself batchsize pastlength pastobservedmask floatstensorself batchsize pastlength 0 5 decoder inputs futuretimefeatures floatstensorself batchsize config predictionlength config numtimefeatures futurevalues floatstensorself batchsize config predictionlength inputsdict pastvalues pastvalues staticcategoricalfeatures staticcategoricalfeatures staticrealfeatures staticrealfeatures pasttimefeatures pasttimefeatures pastobservedmask pastobservedmask futuretimefeatures futuretimefeatures futurevalues futurevalues return inputsdict def prepareconfigandinputsself config self getconfig inputsdict self prepareinformerinputsdictconfig return config inputsdict def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def checkencoderdecodermodelstandaloneself config inputsdict model informermodelconfigconfig totorchdevice eval outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder informerencoder frompretrainedtmpdirname totorchdevice transformerinputs model createnetworkinputsinputsdict encinput transformerinputs config contextlength decinput transformerinputs config contextlength encoderlasthiddenstate2 encoderinputsembedsencinput0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max item 1e3 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder informerdecoder frompretrainedtmpdirname totorchdevice lasthiddenstate2 decoder inputsembedsdecinput encoderhiddenstatesencoderlasthiddenstate 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max item 1e3 requiretorch class informermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses informermodel informerforprediction if istorchavailable else allgenerativemodelclasses informerforprediction if istorchavailable else pipelinemodelmapping featureextraction informermodel if istorchavailable else isencoderdecoder true testpruning false testheadmasking false testmissingkeys false testtorchscript false testinputsembeds false testmodelcommonattributes false def setupself self modeltester informermodeltesterself self configtester configtester self configclassinformerconfig hastextmodalityfalse predictionlengthself modeltester predictionlength def testconfigself self configtester runcommontests def testsaveloadstrictself config self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testencoderdecodermodelstandaloneself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkencoderdecodermodelstandaloneconfigandinputs def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester contextlength if hasattrself modeltester chunklength and self modeltester chunklength 1 seqlength seqlength self modeltester chunklength else seqlength self modeltester seqlength self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize if config isencoderdecoder hiddenstates outputs decoderhiddenstates self assertisinstancehiddenstates list tuple self assertequallenhiddenstates expectednumlayers seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester predictionlength seqlen self assertlistequal listhiddenstates0 shape2 decoderseqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass ignore since we have no tokens embeddings def testresizetokensembeddingsself pass def testmodeloutputsequivalenceself pass def testdeterminismself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass input is staticcategoricalfeatures not inputids def testmodelmaininputnameself modelsignature inspect signaturegetattrinformermodel forward the main input is the name of the argument after self observedmaininputname listmodelsignature parameters keys1 self assertequalinformermodel maininputname observedmaininputname def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pastvalues pasttimefeatures pastobservedmask staticcategoricalfeatures staticrealfeatures futurevalues futuretimefeatures expectedargnames extend futureobservedmask decoderattentionmask headmask decoderheadmask crossattnheadmask encoderoutputs pastkeyvalues outputhiddenstates outputattentions usecache returndict if futureobservedmask in argnames else decoderattentionmask headmask decoderheadmask crossattnheadmask encoderoutputs pastkeyvalues outputhiddenstates outputattentions usecache returndict self assertlistequalargnames lenexpectedargnames expectedargnames def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen contextlength getattrself modeltester contextlength seqlen predictionlength getattrself modeltester predictionlength seqlen for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength contextlength outlen lenoutputs correctoutlen 7 if lasthiddenstate in outputs correctoutlen 1 if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned if loss in outputs correctoutlen 1 if params in outputs correctoutlen 1 self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength predictionlength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength encoderseqlength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 2 lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength contextlength isflaky def testretaingradhiddenstatesattentionsself super testretaingradhiddenstatesattentions def preparebatchfilenametrainbatch pt file hfhubdownloadrepoidhfinternaltestingtourismmonthlybatch filenamefilename repotypedataset batch torch loadfile maplocationtorchdevice return batch requiretorch slow class informermodelintegrationtestsunittest testcase def testinferencenoheadself model informermodel frompretrainedhuggingfaceinformertourismmonthly totorchdevice batch preparebatch torch manualseed0 with torch nograd output model pastvaluesbatchpastvalues pasttimefeaturesbatchpasttimefeatures pastobservedmaskbatchpastobservedmask staticcategoricalfeaturesbatchstaticcategoricalfeatures futurevaluesbatchfuturevalues futuretimefeaturesbatchfuturetimefeatures lasthiddenstate expectedshape torch size64 model config contextlength model config dmodel self assertequaloutput shape expectedshape expectedslice torch tensor 0 4699 0 7295 0 8967 0 4858 0 3810 0 9641 0 0233 0 3608 1 0303 devicetorchdevice self asserttruetorch allcloseoutput0 3 3 expectedslice atoltolerance def testinferenceheadself model informerforprediction frompretrainedhuggingfaceinformertourismmonthly totorchdevice batch preparebatchvalbatch pt torch manualseed0 with torch nograd output model pastvaluesbatchpastvalues pasttimefeaturesbatchpasttimefeatures pastobservedmaskbatchpastobservedmask staticcategoricalfeaturesbatchstaticcategoricalfeatures futuretimefeaturesbatchfuturetimefeatures encoderlasthiddenstate encoder distils the context length to 18th of the original length expectedshape torch size64 model config contextlength 8 model config dmodel self assertequaloutput shape expectedshape expectedslice torch tensor 0 4170 0 9067 0 8153 0 3004 0 7574 0 7066 0 6803 0 6323 1 2802 devicetorchdevice self asserttruetorch allcloseoutput0 3 3 expectedslice atoltolerance def testseqtoseqgenerationself model informerforprediction frompretrainedhuggingfaceinformertourismmonthly totorchdevice batch preparebatchvalbatch pt torch manualseed0 with torch nograd outputs model generate staticcategoricalfeaturesbatchstaticcategoricalfeatures pasttimefeaturesbatchpasttimefeatures pastvaluesbatchpastvalues futuretimefeaturesbatchfuturetimefeatures pastobservedmaskbatchpastobservedmask expectedshape torch size64 model config numparallelsamples model config predictionlength self assertequaloutputs sequences shape expectedshape expectedslice torch tensor3400 8005 4289 2637 7101 9209 devicetorchdevice meanprediction outputs sequences meandim1 self asserttruetorch allclosemeanprediction0 3 expectedslice rtol1e1 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch informer model decoder inputs check that output_hidden_states also work using config ignore since we have no tokens embeddings input is static_categorical_features not input_ids the main input is the name of the argument after self signature parameters is an ordereddict so arg_names order is deterministic check that output_attentions also work using config past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine encoder distils the context length to 1 8th of the original length
import inspect import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin TOLERANCE = 1e-4 if is_torch_available(): import torch from transformers import InformerConfig, InformerForPrediction, InformerModel from transformers.models.informer.modeling_informer import InformerDecoder, InformerEncoder @require_torch class InformerModelTester: def __init__( self, parent, batch_size=13, prediction_length=7, context_length=14, cardinality=19, embedding_dimension=5, num_time_features=4, is_training=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, lags_sequence=[1, 2, 3, 4, 5], sampling_factor=10, distil=False, ): self.parent = parent self.batch_size = batch_size self.prediction_length = prediction_length self.context_length = context_length self.cardinality = cardinality self.num_time_features = num_time_features self.lags_sequence = lags_sequence self.embedding_dimension = embedding_dimension self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.encoder_seq_length = min( sampling_factor * np.ceil(np.log1p(context_length)).astype("int").item(), context_length ) self.decoder_seq_length = min( sampling_factor * np.ceil(np.log1p(prediction_length)).astype("int").item(), prediction_length ) self.sampling_factor = sampling_factor self.distil = distil def get_config(self): return InformerConfig( prediction_length=self.prediction_length, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, context_length=self.context_length, lags_sequence=self.lags_sequence, num_time_features=self.num_time_features, num_static_categorical_features=1, num_static_real_features=1, cardinality=[self.cardinality], embedding_dimension=[self.embedding_dimension], sampling_factor=self.sampling_factor, distil=self.distil, ) def prepare_informer_inputs_dict(self, config): _past_length = config.context_length + max(config.lags_sequence) static_categorical_features = ids_tensor([self.batch_size, 1], config.cardinality[0]) static_real_features = floats_tensor([self.batch_size, 1]) past_time_features = floats_tensor([self.batch_size, _past_length, config.num_time_features]) past_values = floats_tensor([self.batch_size, _past_length]) past_observed_mask = floats_tensor([self.batch_size, _past_length]) > 0.5 future_time_features = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features]) future_values = floats_tensor([self.batch_size, config.prediction_length]) inputs_dict = { "past_values": past_values, "static_categorical_features": static_categorical_features, "static_real_features": static_real_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def prepare_config_and_inputs(self): config = self.get_config() inputs_dict = self.prepare_informer_inputs_dict(config) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = InformerModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = InformerEncoder.from_pretrained(tmpdirname).to(torch_device) transformer_inputs, _, _, _ = model.create_network_inputs(**inputs_dict) enc_input = transformer_inputs[:, : config.context_length, ...] dec_input = transformer_inputs[:, config.context_length :, ...] encoder_last_hidden_state_2 = encoder(inputs_embeds=enc_input)[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = InformerDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( inputs_embeds=dec_input, encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class InformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (InformerModel, InformerForPrediction) if is_torch_available() else () all_generative_model_classes = (InformerForPrediction,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": InformerModel} if is_torch_available() else {} is_encoder_decoder = True test_pruning = False test_head_masking = False test_missing_keys = False test_torchscript = False test_inputs_embeds = False test_model_common_attributes = False def setUp(self): self.model_tester = InformerModelTester(self) self.config_tester = ConfigTester( self, config_class=InformerConfig, has_text_modality=False, prediction_length=self.model_tester.prediction_length, ) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.context_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "prediction_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_resize_tokens_embeddings(self): pass def test_model_outputs_equivalence(self): pass def test_determinism(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_model_main_input_name(self): model_signature = inspect.signature(getattr(InformerModel, "forward")) observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(InformerModel.main_input_name, observed_main_input_name) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] expected_arg_names.extend( [ "future_observed_mask", "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] if "future_observed_mask" in arg_names else [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) context_length = getattr(self.model_tester, "context_length", seq_len) prediction_length = getattr(self.model_tester, "prediction_length", seq_len) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, context_length], ) out_len = len(outputs) correct_outlen = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, prediction_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_seq_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 2, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, context_length], ) @is_flaky() def test_retain_grad_hidden_states_attentions(self): super().test_retain_grad_hidden_states_attentions() def prepare_batch(filename="train-batch.pt"): file = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch", filename=filename, repo_type="dataset") batch = torch.load(file, map_location=torch_device) return batch @require_torch @slow class InformerModelIntegrationTests(unittest.TestCase): def test_inference_no_head(self): model = InformerModel.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device) batch = prepare_batch() torch.manual_seed(0) with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], future_values=batch["future_values"], future_time_features=batch["future_time_features"], ).last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.4699, 0.7295, 0.8967], [0.4858, 0.3810, 0.9641], [-0.0233, 0.3608, 1.0303]], device=torch_device, ) self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): model = InformerForPrediction.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device) batch = prepare_batch("val-batch.pt") torch.manual_seed(0) with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], future_time_features=batch["future_time_features"], ).encoder_last_hidden_state expected_shape = torch.Size((64, model.config.context_length // 8, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.4170, 0.9067, 0.8153], [0.3004, 0.7574, 0.7066], [0.6803, -0.6323, 1.2802]], device=torch_device ) self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): model = InformerForPrediction.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device) batch = prepare_batch("val-batch.pt") torch.manual_seed(0) with torch.no_grad(): outputs = model.generate( static_categorical_features=batch["static_categorical_features"], past_time_features=batch["past_time_features"], past_values=batch["past_values"], future_time_features=batch["future_time_features"], past_observed_mask=batch["past_observed_mask"], ) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor([3400.8005, 4289.2637, 7101.9209], device=torch_device) mean_prediction = outputs.sequences.mean(dim=1) self.assertTrue(torch.allclose(mean_prediction[0, -3:], expected_slice, rtol=1e-1))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch instructblip model import inspect import tempfile import unittest import numpy as np import requests from transformers import configmapping instructblipconfig instructblipprocessor instructblipqformerconfig instructblipvisionconfig from transformers testingutils import requireaccelerate requirebitsandbytes requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor randomattentionmask if istorchavailable import torch from torch import nn from transformers import instructblipforconditionalgeneration instructblipvisionmodel from transformers models instructblip modelinginstructblip import instructblippretrainedmodelarchivelist if isvisionavailable from pil import image class instructblipvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange1e10 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in case of a vision transformer the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return instructblipvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model instructblipvisionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class instructblipvisionmodeltestmodeltestermixin unittest testcase allmodelclasses instructblipvisionmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester instructblipvisionmodeltesterself self configtester configtester self configclassinstructblipvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasoninstructblip s vision encoder does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasoninstructblipvisionmodel is an internal building block doesn t support standalone training def testtrainingself pass unittest skipreasoninstructblipvisionmodel is an internal building block doesn t support standalone training def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasoninstructblipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasoninstructblipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in instructblippretrainedmodelarchivelist 1 model instructblipvisionmodel frompretrainedmodelname self assertisnotnonemodel class instructblipqformermodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 bostokenid0 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope self bostokenid bostokenid def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize qformerinputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength qformerattentionmask idstensorself batchsize self seqlength vocabsize2 if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask qformerinputids qformerattentionmask def getconfigself return instructblipqformerconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange bostokenidself bostokenid this class is based on optmodeltester found in testsmodelsopttestmodelingopt py class instructbliptextmodeldecoderonlytester def init self parent batchsize12 seqlength7 istrainingtrue uselabelsfalse vocabsize99 hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings20 eostokenid2 padtokenid1 bostokenid0 embeddim16 numlabels3 wordembedprojdim16 typesequencelabelsize2 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid self embeddim embeddim self numlabels numlabels self typesequencelabelsize typesequencelabelsize self wordembedprojdim wordembedprojdim self isencoderdecoder false def prepareconfigandinputsself config self getconfig inputids idstensorself batchsize self seqlength self vocabsize clamp3 inputids 1 self eostokenid eos token attentionmask inputids neself padtokenid return config inputids attentionmask def getconfigself return configmappingopt vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads ffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid embeddimself embeddim isencoderdecoderfalse wordembedprojdimself wordembedprojdim this model tester uses a decoderonly language model opt class instructblipforconditionalgenerationdecoderonlymodeltester def init self parent visionkwargsnone qformerkwargsnone textkwargsnone istrainingtrue numquerytokens10 if visionkwargs is none visionkwargs if qformerkwargs is none qformerkwargs if textkwargs is none textkwargs self parent parent self visionmodeltester instructblipvisionmodeltesterparent visionkwargs self qformermodeltester instructblipqformermodeltesterparent qformerkwargs self textmodeltester instructbliptextmodeldecoderonlytesterparent textkwargs self istraining istraining self numquerytokens numquerytokens def prepareconfigandinputsself pixelvalues self visionmodeltester prepareconfigandinputs qformerinputids qformerattentionmask self qformermodeltester prepareconfigandinputs inputids attentionmask self textmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask qformerinputids qformerattentionmask pixelvalues def getconfigself return instructblipconfig fromvisionqformertextconfigs visionconfigself visionmodeltester getconfig qformerconfigself qformermodeltester getconfig textconfigself textmodeltester getconfig numquerytokensself numquerytokens def createandcheckforconditionalgeneration self config inputids attentionmask qformerinputids qformerattentionmask pixelvalues model instructblipforconditionalgenerationconfig totorchdevice eval with torch nograd result model pixelvalues inputidsinputids attentionmaskattentionmask qformerinputidsqformerinputids qformerattentionmaskqformerattentionmask expectedseqlength self numquerytokens self textmodeltester seqlength self parent assertequal result logits shape self visionmodeltester batchsize expectedseqlength self textmodeltester vocabsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask qformerinputids qformerattentionmask pixelvalues configandinputs inputsdict pixelvalues pixelvalues inputids inputids attentionmask attentionmask qformerinputids qformerinputids qformerattentionmask qformerattentionmask labels inputids return config inputsdict requiretorch class instructblipforconditionalgenerationdecoderonlytestmodeltestermixin unittest testcase allmodelclasses instructblipforconditionalgeneration if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testtorchscript false def setupself self modeltester instructblipforconditionalgenerationdecoderonlymodeltesterself def testforconditionalgenerationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforconditionalgenerationconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninstructblipforconditionalgeneration doesn t support inputsembeds def testinputsembedsself pass unittest skipreasontied weights are tested in individual model tests def testtiedweightskeysself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasoninstructblipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass unittest skipreasonthere s no base instructblipmodel def testsaveloadfastinitfrombaseself pass unittest skipreasonthere s no base instructblipmodel def testsaveloadfastinittobaseself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testloadvisionqformertextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save instructblipconfig and check if we can load instructblipvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig instructblipvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save instructblipconfig and check if we can load instructblipqformerconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname qformerconfig instructblipqformerconfig frompretrainedtmpdirname self assertdictequalconfig qformerconfig todict qformerconfig todict slow def testmodelfrompretrainedself for modelname in instructblippretrainedmodelarchivelist model instructblipforconditionalgeneration frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg url https huggingface cohfinternaltestingbliptestimageresolvemaindemo jpg image image openrequests geturl streamtrue raw return image requirevision requiretorch slow class instructblipmodelintegrationtestunittest testcase requirebitsandbytes requireaccelerate def testinferencevicuna7bself processor instructblipprocessor frompretrainedsalesforceinstructblipvicuna7b model instructblipforconditionalgeneration frompretrained salesforceinstructblipvicuna7b loadin8bittrue lowcpumemusagetrue url https raw githubusercontent comsalesforcelavismaindocsstaticconfusingpictures jpg image image openrequests geturl streamtrue raw convertrgb prompt what is unusual about this image inputs processorimagesimage textprompt returntensorspt totorchdevice torch float16 verify logits with torch nograd logits modelinputs logits expectedslice torch tensor 3 4902 12 5078 8 4141 5 1211 12 1328 7 8281 4 0312 13 5938 9 1172 devicetorchdevice self asserttruetorch allcloselogits0 3 3 float expectedslice atol1e3 verify generation outputs model generateinputs maxnewtokens30 generatedtext processor batchdecodeoutputs skipspecialtokenstrue0 strip expectedoutputs 2 450 22910 9565 310 445 1967 338 393 263 767 338 13977 292 22095 373 278 1250 310 263 13328 20134 29963 1550 19500 1623 263 19587 4272 11952 29889 fmt skip self assertequaloutputs0 tolist expectedoutputs self assertequal generatedtext the unusual aspect of this image is that a man is ironing clothes on the back of a yellow suv while driving down a busy city street def testinferenceflant5xlself processor instructblipprocessor frompretrainedsalesforceinstructblipflant5xl model instructblipforconditionalgeneration frompretrained salesforceinstructblipflant5xl torchdtypetorch bfloat16 lowcpumemusagetrue totorchdevice url https raw githubusercontent comsalesforcelavismaindocsstaticconfusingpictures jpg image image openrequests geturl streamtrue raw convertrgb prompt what is unusual about this image inputs processorimagesimage textprompt returntensorspt totorchdevice for k v in inputs items if torch isfloatingpointv inputsk v totorch bfloat16 outputs model generate inputs dosamplefalse numbeams5 maxlength256 minlength1 topp0 9 repetitionpenalty1 5 lengthpenalty1 0 temperature1 generatedtext processor batchdecodeoutputs skipspecialtokenstrue0 expectedoutputs 0 37 1023 9850 7 3 9 388 3575 53 4954 30 8 223 13 3 9 4459 4049 16 8 2214 13 3 9 3164 690 2815 5 37 388 19 5119 3 9 4459 8677 28 3 9 2756 4459 6177 6 11 3 88 19 338 46 3575 53 1476 12 743 112 2491 5 37 1023 19 7225 788 12 8 685 24 34 1267 3 9 388 3575 53 4954 30 8 223 13 3 9 4049 16 8 2214 13 3 9 3164 690 2815 5 94 19 487 24 8 388 19 1119 12 1097 540 57 692 112 10428 30 8 223 13 8 4049 6 68 34 19 92 487 24 3 88 19 1119 12 1097 97 57 692 112 10428 30 8 223 13 8 4049 16 8 2214 13 3 9 3164 690 2815 5 3 13865 13 8 1053 21 8 388 31 7 2874 6 34 19 964 24 3 88 19 1119 12 1097 97 57 692 112 10428 30 8 223 13 8 4049 16 8 2214 13 3 9 3164 690 2815 5 1 fmt skip self assertequaloutputs0 tolist expectedoutputs self assertequal generatedtext the image depicts a man ironing clothes on the back of a yellow van in the middle of a busy city street the man is wearing a yellow shirt with a bright yellow tie and he is using an ironing board to complete his task the image is unusual due to the fact that it shows a man ironing clothes on the back of a van in the middle of a busy city street it is possible that the man is trying to save money by doing his laundry on the back of the van but it is also possible that he is trying to save time by doing his laundry on the back of the van in the middle of a busy city street regardless of the reason for the man s actions it is clear that he is trying to save time by doing his laundry on the back of the van in the middle of a busy city street coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch instructblip model in case of a vision transformer the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as instructblip s vision encoder does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic this class is based on optmodeltester found in tests models opt test_modeling_opt py eos token this model tester uses a decoder only language model opt signature parameters is an ordereddict so arg_names order is deterministic save instructblipconfig and check if we can load instructblipvisionconfig from it save instructblipconfig and check if we can load instructblipqformerconfig from it we will verify our results on an image of cute cats verify logits verify generation fmt skip fmt skip
import inspect import tempfile import unittest import numpy as np import requests from transformers import ( CONFIG_MAPPING, InstructBlipConfig, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from transformers.testing_utils import ( require_accelerate, require_bitsandbytes, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask, ) if is_torch_available(): import torch from torch import nn from transformers import InstructBlipForConditionalGeneration, InstructBlipVisionModel from transformers.models.instructblip.modeling_instructblip import INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class InstructBlipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return InstructBlipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = InstructBlipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class InstructBlipVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (InstructBlipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = InstructBlipVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=InstructBlipVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="InstructBLIP's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training") def test_training(self): pass @unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="InstructBlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="InstructBlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = InstructBlipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class InstructBlipQFormerModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) qformer_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) qformer_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask, qformer_input_ids, qformer_attention_mask def get_config(self): return InstructBlipQFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) class InstructBlipTextModelDecoderOnlyTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, num_labels=3, word_embed_proj_dim=16, type_sequence_label_size=2, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.num_labels = num_labels self.type_sequence_label_size = type_sequence_label_size self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs(self): config = self.get_config() input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_ids[:, -1] = self.eos_token_id attention_mask = input_ids.ne(self.pad_token_id) return config, input_ids, attention_mask def get_config(self): return CONFIG_MAPPING["opt"]( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, is_encoder_decoder=False, word_embed_proj_dim=self.word_embed_proj_dim, ) class InstructBlipForConditionalGenerationDecoderOnlyModelTester: def __init__( self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10 ): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {} if text_kwargs is None: text_kwargs = {} self.parent = parent self.vision_model_tester = InstructBlipVisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = InstructBlipQFormerModelTester(parent, **qformer_kwargs) self.text_model_tester = InstructBlipTextModelDecoderOnlyTester(parent, **text_kwargs) self.is_training = is_training self.num_query_tokens = num_query_tokens def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() _, _, _, qformer_input_ids, qformer_attention_mask = self.qformer_model_tester.prepare_config_and_inputs() _, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values def get_config(self): return InstructBlipConfig.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), text_config=self.text_model_tester.get_config(), num_query_tokens=self.num_query_tokens, ) def create_and_check_for_conditional_generation( self, config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values ): model = InstructBlipForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model( pixel_values, input_ids=input_ids, attention_mask=attention_mask, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, ) expected_seq_length = self.num_query_tokens + self.text_model_tester.seq_length self.parent.assertEqual( result.logits.shape, (self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "qformer_input_ids": qformer_input_ids, "qformer_attention_mask": qformer_attention_mask, "labels": input_ids, } return config, inputs_dict @require_torch class InstructBlipForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (InstructBlipForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = InstructBlipForConditionalGenerationDecoderOnlyModelTester(self) def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="InstructBlipForConditionalGeneration doesn't support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Tied weights are tested in individual model tests") def test_tied_weights_keys(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="InstructBlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="There's no base InstructBlipModel") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="There's no base InstructBlipModel") def test_save_load_fast_init_to_base(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = InstructBlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = InstructBlipQFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST: model = InstructBlipForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch @slow class InstructBlipModelIntegrationTest(unittest.TestCase): @require_bitsandbytes @require_accelerate def test_inference_vicuna_7b(self): processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b") model = InstructBlipForConditionalGeneration.from_pretrained( "Salesforce/instructblip-vicuna-7b", load_in_8bit=True, low_cpu_mem_usage=True ) url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") prompt = "What is unusual about this image?" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, torch.float16) with torch.no_grad(): logits = model(**inputs).logits expected_slice = torch.tensor( [[-3.4902, -12.5078, 8.4141], [-5.1211, -12.1328, 7.8281], [-4.0312, -13.5938, 9.1172]], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3].float(), expected_slice, atol=1e-3)) outputs = model.generate(**inputs, max_new_tokens=30) generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip() expected_outputs = [2, 450, 22910, 9565, 310, 445, 1967, 338, 393, 263, 767, 338, 13977, 292, 22095, 373, 278, 1250, 310, 263, 13328, 20134, 29963, 1550, 19500, 1623, 263, 19587, 4272, 11952, 29889] self.assertEqual(outputs[0].tolist(), expected_outputs) self.assertEqual( generated_text, "The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV while driving down a busy city street.", ) def test_inference_flant5_xl(self): processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-flan-t5-xl") model = InstructBlipForConditionalGeneration.from_pretrained( "Salesforce/instructblip-flan-t5-xl", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, ).to(torch_device) url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") prompt = "What is unusual about this image?" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device) for k, v in inputs.items(): if torch.is_floating_point(v): inputs[k] = v.to(torch.bfloat16) outputs = model.generate( **inputs, do_sample=False, num_beams=5, max_length=256, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, ) generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0] expected_outputs = [0, 37, 1023, 9850, 7, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4459, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 388, 19, 5119, 3, 9, 4459, 8677, 28, 3, 9, 2756, 4459, 6177, 6, 11, 3, 88, 19, 338, 46, 3575, 53, 1476, 12, 743, 112, 2491, 5, 37, 1023, 19, 7225, 788, 12, 8, 685, 24, 34, 1267, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 94, 19, 487, 24, 8, 388, 19, 1119, 12, 1097, 540, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 6, 68, 34, 19, 92, 487, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 3, 13865, 13, 8, 1053, 21, 8, 388, 31, 7, 2874, 6, 34, 19, 964, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 1] self.assertEqual(outputs[0].tolist(), expected_outputs) self.assertEqual( generated_text, "The image depicts a man ironing clothes on the back of a yellow van in the middle of a busy city street. The man is wearing a yellow shirt with a bright yellow tie, and he is using an ironing board to complete his task. The image is unusual due to the fact that it shows a man ironing clothes on the back of a van in the middle of a busy city street. It is possible that the man is trying to save money by doing his laundry on the back of the van, but it is also possible that he is trying to save time by doing his laundry on the back of the van in the middle of a busy city street. Regardless of the reason for the man's actions, it is clear that he is trying to save time by doing his laundry on the back of the van in the middle of a busy city street.", )
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpifytrue or a list of pytorch tensors if one specifies torchifytrue test if it raises when no input is passed 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpify true or a list of pytorch tensors if one specifies torchify true test if it raises when no input is passed
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPT2Tokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class InstructBlipProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = BlipImageProcessor() tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model") qformer_tokenizer = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert") processor = InstructBlipProcessor(image_processor, tokenizer, qformer_tokenizer) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def get_qformer_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).qformer_tokenizer def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = InstructBlipProcessor( tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), ) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = InstructBlipProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, BlipImageProcessor) self.assertIsInstance(processor.qformer_tokenizer, BertTokenizerFast) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() qformer_tokenizer = self.get_qformer_tokenizer() processor = InstructBlipProcessor( tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer ) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() qformer_tokenizer = self.get_qformer_tokenizer() processor = InstructBlipProcessor( tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer ) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tokens = tokenizer(input_str, return_token_type_ids=False) encoded_tokens_qformer = qformer_tokenizer(input_str, return_token_type_ids=False) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key], encoded_processor[key]) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key], encoded_processor["qformer_" + key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() qformer_tokenizer = self.get_qformer_tokenizer() processor = InstructBlipProcessor( tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer ) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"], ) with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() qformer_tokenizer = self.get_qformer_tokenizer() processor = InstructBlipProcessor( tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer ) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() qformer_tokenizer = self.get_qformer_tokenizer() processor = InstructBlipProcessor( tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer ) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"], )
codingutf8 2023 microsoft research and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch kosmos2 model import copy import inspect import os import tempfile import unittest import numpy as np import requests from transformers import automodelforvision2seq autoprocessor kosmos2config from transformers models kosmos2 configurationkosmos2 import kosmos2textconfig kosmos2visionconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import kosmos2forconditionalgeneration kosmos2model from transformers models kosmos2 modelingkosmos2 import kosmos2pretrainedmodelarchivelist if isvisionavailable from pil import image class kosmos2visionmodeltester def init self parent batchsize12 imagesize32 patchsize4 numchannels3 istrainingtrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange1e10 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return kosmos2visionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict class kosmos2textmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return kosmos2textconfig vocabsizeself vocabsize embeddimself hiddensize layersself numhiddenlayers attentionheadsself numattentionheads ffndimself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict class kosmos2modeltester def initself parent textkwargsnone visionkwargsnone latentquerynum3 istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester kosmos2textmodeltesterparent textkwargs self visionmodeltester kosmos2visionmodeltesterparent visionkwargs self latentquerynum latentquerynum self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs build imageembedspositionmask imageembedspositionmask torch zeroslikeinputids imageembedspositionmask 1 1 self latentquerynum 1 config self getconfig return config inputids attentionmask imageembedspositionmask pixelvalues def getconfigself return kosmos2config self textmodeltester getconfig todict self visionmodeltester getconfig todict latentquerynumself latentquerynum def createandcheckmodelself config inputids attentionmask imageembedspositionmask pixelvalues model kosmos2modelconfig totorchdevice eval with torch nograd result modelpixelvalues inputids imageembedspositionmask attentionmask self parent assertequal result lasthiddenstate shape self textmodeltester batchsize self textmodeltester seqlength self textmodeltester hiddensize self parent assertequal result imageembeds shape self textmodeltester batchsize self latentquerynum self textmodeltester hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask imageembedspositionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask imageembedspositionmask imageembedspositionmask pixelvalues pixelvalues return config inputsdict requiretorch class kosmos2modeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses kosmos2model kosmos2forconditionalgeneration if istorchavailable else allgenerativemodelclasses kosmos2forconditionalgeneration if istorchavailable else pipelinemodelmapping featureextraction kosmos2model imagetotext kosmos2forconditionalgeneration if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false todo imagetotext pipeline for this model needs processor def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname return pipelinetestcassename imagetotextpipelinetests def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict copy deepcopyinputsdict if returnlabels if modelclass name kosmos2forconditionalgeneration inputsdictlabels torch zeros self modeltester textmodeltester batchsize self modeltester textmodeltester seqlength dtypetorch long devicetorchdevice return inputsdict def setupself self modeltester kosmos2modeltesterself self configtester configtesterself configclasskosmos2config hiddensize37 overwrite from common to skip imagetotextprojection latentquery def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad if name imagetotextprojection latentquery the original code use nn parametertorch randn for which this test won t pass continue self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testloadsavewithouttiedweightsself config self modeltester prepareconfigandinputsforcommon config textconfig tiewordembeddings false for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as d model savepretrainedd modelreloaded infos modelclass frompretrainedd outputloadinginfotrue checking the state dicts are correct reloadedstate modelreloaded statedict for k v in model statedict items self assertink reloadedstate fkey k is missing from reloaded torch testing assertclose v reloadedstatek msglambda x fmodelclass name tensor k x checking there was no complain of missing weights self assertequalinfosmissingkeys overwrite from common in order to use self modeltester textmodeltester numhiddenlayers def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester textmodeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers seqlength self modeltester textmodeltester seqlength self assertlistequal listhiddenstates0 shape2 seqlength self modeltester textmodeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass overwrite from common in order to use config textconfig vocabsize instead of config vocabsize def testtiemodelweightsself if not self testtorchscript return config inputsdict self modeltester prepareconfigandinputsforcommon def checksamevalueslayer1 layer2 equal true for p1 p2 in ziplayer1 weight layer2 weight if p1 data nep2 data sum 0 equal false return equal for modelclass in self allmodelclasses config torchscript true modelnottied modelclassconfig if modelnottied getoutputembeddings is none continue configtied copy deepcopyconfig configtied torchscript false modeltied modelclassconfigtied paramstied listmodeltied parameters check that the embedding layer and decoding layer are the same in size and in value self asserttruechecksamevaluesembeddings decoding check that after modification they remain the same embeddings weight data div2 check that the embedding layer and decoding layer are the same in size and in value self asserttrueembeddings weight shape decoding weight shape self asserttruechecksamevaluesembeddings decoding check that after modification they remain the same decoding weight data div4 check that the embedding layer and decoding layer are the same in size and in value self asserttrueembeddings weight shape decoding weight shape self asserttruechecksamevaluesembeddings decoding check that after resize they remain tied modeltied resizetokenembeddingsconfig textconfig vocabsize 10 paramstied2 listmodeltied parameters self assertequallenparamstied2 lenparamstied decoding weight data mul20 check that the embedding layer and decoding layer are the same in size and in value self asserttruemodel transformer wte weight shape model lmhead weight shape self asserttruechecksamevaluesmodel transformer wte model lmhead slow def testmodelfrompretrainedself for modelname in kosmos2pretrainedmodelarchivelist 1 model kosmos2model frompretrainedmodelname self assertisnotnonemodel def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval inputs self prepareforclassinputsdict modelclass maininputname modelclass maininputname try maininput inputsmaininputname modelmaininput inputsinputids inputsimageembedspositionmask tracedmodel torch jit trace model maininput inputsinputids inputsimageembedspositionmask except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items if layername in loadedmodelstatedict p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb self cleartorchjitclassregistry we will verify our results on an image of cute cats def prepareimg url https huggingface cohfinternaltestingkosmos2testimageresolvemaindemo jpg im image openrequests geturl streamtrue raw return im requirevision requiretorch slow class kosmos2modelintegrationtestunittest testcase def runexampleself prompt image model processor inputs processortextprompt imagesimage returntensorspt paddingtrue totorchdevice generationoutputs model generate pixelvaluesinputspixelvalues inputidsinputsinputids attentionmaskinputsattentionmask imageembedsnone imageembedspositionmaskinputsimageembedspositionmask usecachetrue maxnewtokens128 outputscorestrue returndictingeneratetrue scores generationoutputs scores generatedids generationoutputs sequences generatedtext processor batchdecodegeneratedids skipspecialtokenstrue specify cleanupandextractfalse in order to see the raw model generation processedtext processor postprocessgenerationx cleanupandextractfalse for x in generatedtext by default the generated text is cleanup and the entities are extracted finaltextwithentities processor postprocessgenerationx for x in generatedtext return scores generatedids generatedtext processedtext finaltextwithentities def testsnowmanimagecaptioningself url https huggingface comicrosoftkosmos2patch14224resolvemainsnowman png image image openrequests geturl streamtrue raw image savenewimage jpg image image opennewimage jpg model automodelforvision2seq frompretrainedmicrosoftkosmos2patch14224 totorchdevice processor autoprocessor frompretrainedmicrosoftkosmos2patch14224 prompt groundingan image of scores generatedids generatedtext processedtext finaltextwithentities self runexample prompt image model processor processedtext processedtext0 finaltext entities finaltextwithentities0 np testing assertallclose torch concatscores1 4 3 3 tocpu numpy np array 1 5672581195831299 5 007406711578369 4 36448860168457 2 147017002105713 4 966302871704102 4 592559337615967 0 9352350831031799 4 688288688659668 6 240612983703613 atol1e5 np testing assertallclose torch concatscores3 3 3 tocpu numpy np array 2 9916205406188965 2 481820583343506 4 646594524383545 2 8381078243255615 2 9687185287475586 2 6926779747009277 2 8909168243408203 3 2228589057922363 1 7056822776794434 atol1e5 fmt off expectedids 0 64003 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 64004 64012 712 1648 9 64007 10 43867 64008 64009 64057 64876 64010 5950 597 32 64007 10 646 64008 64009 64018 64924 64010 4 2 fmt on self assertlistequalgeneratedids tocpu numpy tolist expectedids expectedprocessedtext grounding an image ofphrase a snowmanphraseobjectpatchindex0044patchindex0863object warming himself byphrase a firephraseobjectpatchindex0005patchindex0911object self assertequalprocessedtext expectedprocessedtext self assertequalfinaltext an image of a snowman warming himself by a fire expectedentities a snowman 12 21 0 390625 0 046875 0 984375 0 828125 a fire 41 47 0 171875 0 015625 0 484375 0 890625 self assertlistequalentities expectedentities test with the detail caption generation prompt groundingdescribe this image in detail scores generatedids generatedtext processedtext finaltextwithentities self runexample prompt image model processor processedtext processedtext0 finaltext entities finaltextwithentities0 np testing assertallclose torch concatscores1 4 3 3 tocpu numpy np array 0 9093570113182068 4 578373908996582 5 96360969543457 2 452126979827881 4 090598106384277 8 738677024841309 0 7624598741531372 4 771658897399902 6 576295852661133 atol1e5 np testing assertallclose torch concatscores3 3 3 tocpu numpy np array 1 673659086227417 2 162452220916748 1 95430588722229 2 006824493408203 2 2038745880126953 1 24686861038208 3 2783470153808594 2 814181089401245 1 390632152557373 atol1e5 fmt off expectedidslong 0 64003 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 64004 64012 34645 247 38 1648 12 3391 55 24 1648 1338 10 43867 1280 32 64007 10 30879 64008 64009 64018 65020 64010 12 5 1842 4 71 17 1679 64007 10 3958 64008 64009 64061 64263 64010 6 64007 15719 64008 64009 64253 64617 64010 6 8 64007 9626 64008 64009 64413 64545 64010 6 23 64007 10 4363 64008 64009 64623 64885 64010 2255 8 64007 10 3486 64008 64009 64809 65036 64010 1560 2255 4 24 43867 1684 7 27 3774 5 10356 9 5 646 6 8 22 1684 7 30 10 2007 8 16239 4337 4 2 fmt on self assertlistequalgeneratedids tocpu numpy tolist expectedidslong expectedprocessedtextlong grounding describe this image in detail the image features a snowman sitting byphrase a campfire phraseobjectpatchindex0005patchindex1007object in the snow he is wearingphrase a hat phraseobjectpatchindex0048patchindex0250object phrase scarfphraseobject patchindex0240patchindex0604object andphrase glovesphraseobjectpatchindex0400 patchindex0532object withphrase a potphraseobjectpatchindex0610patchindex0872 object nearby andphrase a cupphraseobjectpatchindex0796patchindex1023object placed nearby the snowman appears to be enjoying the warmth of the fire and it appears to have a warm and cozy atmosphere self assertequalprocessedtext expectedprocessedtextlong expectedfinaltextlong describe this image in detail the image features a snowman sitting by a campfire in the snow he is wearing a hat scarf and gloves with a pot nearby and a cup placed nearby the snowman appears to be enjoying the warmth of the fire and it appears to have a warm and cozy atmosphere self assertequalfinaltext expectedfinaltextlong expectedentitieslong a campfire 71 81 0 171875 0 015625 0 484375 0 984375 a hat 109 114 0 515625 0 046875 0 828125 0 234375 scarf 116 121 0 515625 0 234375 0 890625 0 578125 gloves 127 133 0 515625 0 390625 0 640625 0 515625 a pot 140 145 0 078125 0 609375 0 265625 0 859375 a cup 157 162 0 890625 0 765625 0 984375 0 984375 self assertlistequalentities expectedentitieslong def testsnowmanimagecaptioningbatchself url https huggingface comicrosoftkosmos2patch14224resolvemainsnowman png image image openrequests geturl streamtrue raw image savenewimage jpg image image opennewimage jpg model automodelforvision2seq frompretrainedmicrosoftkosmos2patch14224 totorchdevice prompt groundingdescribe this image in detail groundingan image of left padding processor autoprocessor frompretrainedmicrosoftkosmos2patch14224 paddingsideleft scores generatedids generatedtext processedtext finaltextwithentities self runexample prompt image lenprompt model processor allfinaltext x0 for x in finaltextwithentities allentities x1 for x in finaltextwithentities left padding gives identical results as nonpadding expectedprocessedtext0 grounding describe this image in detail the image features a snowman sitting byphrase a campfire phraseobjectpatchindex0005patchindex1007object in the snow he is wearingphrase a hat phraseobjectpatchindex0048patchindex0250object phrase scarfphraseobject patchindex0240patchindex0604object andphrase glovesphraseobjectpatchindex0400 patchindex0532object withphrase a potphraseobjectpatchindex0610patchindex0872 object nearby andphrase a cupphraseobjectpatchindex0796patchindex1023object placed nearby the snowman appears to be enjoying the warmth of the fire and it appears to have a warm and cozy atmosphere expectedprocessedtext1 grounding an image ofphrase a snowmanphraseobjectpatchindex0044patchindex0863object warming himself byphrase a firephraseobjectpatchindex0005patchindex0911object self assertlistequalprocessedtext expectedprocessedtext0 expectedprocessedtext1 expectedfinaltext0 describe this image in detail the image features a snowman sitting by a campfire in the snow he is wearing a hat scarf and gloves with a pot nearby and a cup placed nearby the snowman appears to be enjoying the warmth of the fire and it appears to have a warm and cozy atmosphere expectedfinaltext1 an image of a snowman warming himself by a fire self assertlistequalallfinaltext expectedfinaltext0 expectedfinaltext1 expectedentities0 a campfire 71 81 0 171875 0 015625 0 484375 0 984375 a hat 109 114 0 515625 0 046875 0 828125 0 234375 scarf 116 121 0 515625 0 234375 0 890625 0 578125 gloves 127 133 0 515625 0 390625 0 640625 0 515625 a pot 140 145 0 078125 0 609375 0 265625 0 859375 a cup 157 162 0 890625 0 765625 0 984375 0 984375 expectedentities1 a snowman 12 21 0 390625 0 046875 0 984375 0 828125 a fire 41 47 0 171875 0 015625 0 484375 0 890625 self assertlistequalallentities expectedentities0 expectedentities1 right padding processor autoprocessor frompretrainedmicrosoftkosmos2patch14224 scores generatedids generatedtext processedtext finaltextwithentities self runexample prompt image lenprompt model processor allfinaltext x0 for x in finaltextwithentities allentities x1 for x in finaltextwithentities for right padding only the nonpadded sequences will give the same results as nonpadding self assertequalprocessedtext0 expectedprocessedtext0 self assertequalallfinaltext0 expectedfinaltext0 self assertlistequalallentities0 expectedentities0 coding utf 8 2023 microsoft research and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch kosmos 2 model in vit the seq length equals the number of patches 1 we add 1 for the cls token build image_embeds_position_mask todo image to text pipeline for this model needs processor overwrite from common to skip image_to_text_projection latent_query the original code use nn parameter torch randn for which this test won t pass signature parameters is an ordereddict so arg_names order is deterministic checking the state dicts are correct checking there was no complain of missing weights overwrite from common in order to use self model_tester text_model_tester num_hidden_layers check that output_hidden_states also work using config overwrite from common in order to use config text_config vocab_size instead of config vocab_size check that the embedding layer and decoding layer are the same in size and in value self asserttrue check_same_values embeddings decoding check that after modification they remain the same embeddings weight data div_ 2 check that the embedding layer and decoding layer are the same in size and in value self asserttrue embeddings weight shape decoding weight shape self asserttrue check_same_values embeddings decoding check that after modification they remain the same decoding weight data div_ 4 check that the embedding layer and decoding layer are the same in size and in value self asserttrue embeddings weight shape decoding weight shape self asserttrue check_same_values embeddings decoding check that after resize they remain tied decoding weight data mul_ 20 check that the embedding layer and decoding layer are the same in size and in value self asserttrue model transformer wte weight shape model lm_head weight shape self asserttrue check_same_values model transformer wte model lm_head to be sure we have no nan avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb we will verify our results on an image of cute cats specify cleanup_and_extract false in order to see the raw model generation by default the generated text is cleanup and the entities are extracted fmt off fmt on test with the detail caption generation fmt off fmt on left padding left padding gives identical results as non padding right padding for right padding only the non padded sequences will give the same results as non padding
import copy import inspect import os import tempfile import unittest import numpy as np import requests from transformers import AutoModelForVision2Seq, AutoProcessor, Kosmos2Config from transformers.models.kosmos2.configuration_kosmos2 import Kosmos2TextConfig, Kosmos2VisionConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import Kosmos2ForConditionalGeneration, Kosmos2Model from transformers.models.kosmos2.modeling_kosmos2 import KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class Kosmos2VisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, patch_size=4, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return Kosmos2VisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict class Kosmos2TextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Kosmos2TextConfig( vocab_size=self.vocab_size, embed_dim=self.hidden_size, layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict class Kosmos2ModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, latent_query_num=3, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = Kosmos2TextModelTester(parent, **text_kwargs) self.vision_model_tester = Kosmos2VisionModelTester(parent, **vision_kwargs) self.latent_query_num = latent_query_num self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() image_embeds_position_mask = torch.zeros_like(input_ids) image_embeds_position_mask[:, 1 : 1 + self.latent_query_num :] = 1 config = self.get_config() return config, input_ids, attention_mask, image_embeds_position_mask, pixel_values def get_config(self): return Kosmos2Config( self.text_model_tester.get_config().to_dict(), self.vision_model_tester.get_config().to_dict(), latent_query_num=self.latent_query_num, ) def create_and_check_model(self, config, input_ids, attention_mask, image_embeds_position_mask, pixel_values): model = Kosmos2Model(config).to(torch_device).eval() with torch.no_grad(): result = model(pixel_values, input_ids, image_embeds_position_mask, attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size), ) self.parent.assertEqual( result.image_embeds.shape, (self.text_model_tester.batch_size, self.latent_query_num, self.text_model_tester.hidden_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, image_embeds_position_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "image_embeds_position_mask": image_embeds_position_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class Kosmos2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Kosmos2Model, Kosmos2ForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (Kosmos2ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": Kosmos2Model, "image-to-text": Kosmos2ForConditionalGeneration} if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return pipeline_test_casse_name == "ImageToTextPipelineTests" def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class.__name__ == "Kosmos2ForConditionalGeneration": inputs_dict["labels"] = torch.zeros( (self.model_tester.text_model_tester.batch_size, self.model_tester.text_model_tester.seq_length), dtype=torch.long, device=torch_device, ) return inputs_dict def setUp(self): self.model_tester = Kosmos2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Kosmos2Config, hidden_size=37) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "image_to_text_projection.latent_query": continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_save_without_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.text_config.tie_word_embeddings = False for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as d: model.save_pretrained(d) model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) reloaded_state = model_reloaded.state_dict() for k, v in model.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) self.assertEqual(infos["missing_keys"], []) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.text_model_tester.num_hidden_layers + 1, ) self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.text_model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.text_model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_tie_model_weights(self): if not self.test_torchscript: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_same_values(layer_1, layer_2): equal = True for p1, p2 in zip(layer_1.weight, layer_2.weight): if p1.data.ne(p2.data).sum() > 0: equal = False return equal for model_class in self.all_model_classes: config.torchscript = True model_not_tied = model_class(config) if model_not_tied.get_output_embeddings() is None: continue config_tied = copy.deepcopy(config) config_tied.torchscript = False model_tied = model_class(config_tied) params_tied = list(model_tied.parameters()) model_tied.resize_token_embeddings(config.text_config.vocab_size + 10) params_tied_2 = list(model_tied.parameters()) self.assertEqual(len(params_tied_2), len(params_tied)) @slow def test_model_from_pretrained(self): for model_name in KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Kosmos2Model.from_pretrained(model_name) self.assertIsNotNone(model) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: main_input = inputs[main_input_name] model(main_input, inputs["input_ids"], inputs["image_embeds_position_mask"]) traced_model = torch.jit.trace( model, (main_input, inputs["input_ids"], inputs["image_embeds_position_mask"]) ) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) self.clear_torch_jit_class_registry() def prepare_img(): url = "https://huggingface.co/hf-internal-testing/Kosmos2-test-image/resolve/main/demo.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch @slow class Kosmos2ModelIntegrationTest(unittest.TestCase): def run_example(self, prompt, image, model, processor): inputs = processor(text=prompt, images=image, return_tensors="pt", padding=True).to(torch_device) generation_outputs = model.generate( pixel_values=inputs["pixel_values"], input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], image_embeds=None, image_embeds_position_mask=inputs["image_embeds_position_mask"], use_cache=True, max_new_tokens=128, output_scores=True, return_dict_in_generate=True, ) scores = generation_outputs.scores generated_ids = generation_outputs.sequences generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) processed_text = [processor.post_process_generation(x, cleanup_and_extract=False) for x in generated_text] final_text_with_entities = [processor.post_process_generation(x) for x in generated_text] return scores, generated_ids, generated_text, processed_text, final_text_with_entities def test_snowman_image_captioning(self): url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png" image = Image.open(requests.get(url, stream=True).raw) image.save("new_image.jpg") image = Image.open("new_image.jpg") model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device) processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") prompt = "<grounding>An image of" scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( prompt, image, model, processor ) processed_text = processed_text[0] final_text, entities = final_text_with_entities[0] np.testing.assert_allclose( torch.concat(scores[1:4])[:3, :3].to("cpu").numpy(), np.array( [ [-1.5672581195831299, -5.007406711578369, 4.36448860168457], [-2.147017002105713, -4.966302871704102, 4.592559337615967], [-0.9352350831031799, -4.688288688659668, 6.240612983703613], ] ), atol=1e-5, ) np.testing.assert_allclose( torch.concat(scores[-3:])[-3:, -3:].to("cpu").numpy(), np.array( [ [2.9916205406188965, 2.481820583343506, 4.646594524383545], [-2.8381078243255615, -2.9687185287475586, -2.6926779747009277], [-2.8909168243408203, -3.2228589057922363, -1.7056822776794434], ] ), atol=1e-5, ) EXPECTED_IDS = [ [ 0, 64003, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 64004, 64012, 712, 1648, 9, 64007, 10, 43867, 64008, 64009, 64057, 64876, 64010, 5950, 597, 32, 64007, 10, 646, 64008, 64009, 64018, 64924, 64010, 4, 2 ] ] self.assertListEqual(generated_ids.to("cpu").numpy().tolist(), EXPECTED_IDS) EXPECTED_PROCESSED_TEXT = ( "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> " "warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>." ) self.assertEqual(processed_text, EXPECTED_PROCESSED_TEXT) self.assertEqual(final_text, "An image of a snowman warming himself by a fire.") EXPECTED_ENTITIES = [ ("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]), ] self.assertListEqual(entities, EXPECTED_ENTITIES) prompt = "<grounding>Describe this image in detail:" scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( prompt, image, model, processor ) processed_text = processed_text[0] final_text, entities = final_text_with_entities[0] np.testing.assert_allclose( torch.concat(scores[1:4])[:3, :3].to("cpu").numpy(), np.array( [ [-0.9093570113182068, -4.578373908996582, 5.96360969543457], [2.452126979827881, -4.090598106384277, 8.738677024841309], [-0.7624598741531372, -4.771658897399902, 6.576295852661133], ] ), atol=1e-5, ) np.testing.assert_allclose( torch.concat(scores[-3:])[-3:, -3:].to("cpu").numpy(), np.array( [ [-1.673659086227417, -2.162452220916748, -1.95430588722229], [-2.006824493408203, -2.2038745880126953, -1.24686861038208], [-3.2783470153808594, -2.814181089401245, -1.390632152557373], ] ), atol=1e-5, ) EXPECTED_IDS_LONG = [ [ 0, 64003, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 64004, 64012, 34645, 247, 38, 1648, 12, 3391, 55, 24, 1648, 1338, 10, 43867, 1280, 32, 64007, 10, 30879, 64008, 64009, 64018, 65020, 64010, 12, 5, 1842, 4, 71, 17, 1679, 64007, 10, 3958, 64008, 64009, 64061, 64263, 64010, 6, 64007, 15719, 64008, 64009, 64253, 64617, 64010, 6, 8, 64007, 9626, 64008, 64009, 64413, 64545, 64010, 6, 23, 64007, 10, 4363, 64008, 64009, 64623, 64885, 64010, 2255, 8, 64007, 10, 3486, 64008, 64009, 64809, 65036, 64010, 1560, 2255, 4, 24, 43867, 1684, 7, 27, 3774, 5, 10356, 9, 5, 646, 6, 8, 22, 1684, 7, 30, 10, 2007, 8, 16239, 4337, 4, 2 ] ] self.assertListEqual(generated_ids.to("cpu").numpy().tolist(), EXPECTED_IDS_LONG) EXPECTED_PROCESSED_TEXT_LONG = ( "<grounding> Describe this image in detail: The image features a snowman sitting by<phrase> a campfire" "</phrase><object><patch_index_0005><patch_index_1007></object> in the snow. He is wearing<phrase> a hat" "</phrase><object><patch_index_0048><patch_index_0250></object>,<phrase> scarf</phrase><object>" "<patch_index_0240><patch_index_0604></object>, and<phrase> gloves</phrase><object><patch_index_0400>" "<patch_index_0532></object>, with<phrase> a pot</phrase><object><patch_index_0610><patch_index_0872>" "</object> nearby and<phrase> a cup</phrase><object><patch_index_0796><patch_index_1023></object> placed " "nearby. The snowman appears to be enjoying the warmth of the fire, and it appears to have a warm and cozy " "atmosphere." ) self.assertEqual(processed_text, EXPECTED_PROCESSED_TEXT_LONG) EXPECTED_FINAL_TEXT_LONG = ( "Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is " "wearing a hat, scarf, and gloves, with a pot nearby and a cup placed nearby. The snowman appears to be " "enjoying the warmth of the fire, and it appears to have a warm and cozy atmosphere." ) self.assertEqual(final_text, EXPECTED_FINAL_TEXT_LONG) EXPECTED_ENTITIES_LONG = [ ("a campfire", (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]), ("a hat", (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]), ("scarf", (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]), ("gloves", (127, 133), [(0.515625, 0.390625, 0.640625, 0.515625)]), ("a pot", (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)]), ("a cup", (157, 162), [(0.890625, 0.765625, 0.984375, 0.984375)]), ] self.assertListEqual(entities, EXPECTED_ENTITIES_LONG) def test_snowman_image_captioning_batch(self): url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png" image = Image.open(requests.get(url, stream=True).raw) image.save("new_image.jpg") image = Image.open("new_image.jpg") model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device) prompt = ["<grounding>Describe this image in detail:", "<grounding>An image of"] processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( prompt, [image] * len(prompt), model, processor ) all_final_text = [x[0] for x in final_text_with_entities] all_entities = [x[1] for x in final_text_with_entities] EXPECTED_PROCESSED_TEXT_0 = ( "<grounding> Describe this image in detail: The image features a snowman sitting by<phrase> a campfire" "</phrase><object><patch_index_0005><patch_index_1007></object> in the snow. He is wearing<phrase> a hat" "</phrase><object><patch_index_0048><patch_index_0250></object>,<phrase> scarf</phrase><object>" "<patch_index_0240><patch_index_0604></object>, and<phrase> gloves</phrase><object><patch_index_0400>" "<patch_index_0532></object>, with<phrase> a pot</phrase><object><patch_index_0610><patch_index_0872>" "</object> nearby and<phrase> a cup</phrase><object><patch_index_0796><patch_index_1023></object> placed " "nearby. The snowman appears to be enjoying the warmth of the fire, and it appears to have a warm and cozy " "atmosphere." ) EXPECTED_PROCESSED_TEXT_1 = ( "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> " "warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>." ) self.assertListEqual(processed_text, [EXPECTED_PROCESSED_TEXT_0, EXPECTED_PROCESSED_TEXT_1]) EXPECTED_FINAL_TEXT_0 = ( "Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is " "wearing a hat, scarf, and gloves, with a pot nearby and a cup placed nearby. The snowman appears to be " "enjoying the warmth of the fire, and it appears to have a warm and cozy atmosphere." ) EXPECTED_FINAL_TEXT_1 = "An image of a snowman warming himself by a fire." self.assertListEqual(all_final_text, [EXPECTED_FINAL_TEXT_0, EXPECTED_FINAL_TEXT_1]) EXPECTED_ENTITIES_0 = [ ("a campfire", (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]), ("a hat", (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]), ("scarf", (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]), ("gloves", (127, 133), [(0.515625, 0.390625, 0.640625, 0.515625)]), ("a pot", (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)]), ("a cup", (157, 162), [(0.890625, 0.765625, 0.984375, 0.984375)]), ] EXPECTED_ENTITIES_1 = [ ("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]), ] self.assertListEqual(all_entities, [EXPECTED_ENTITIES_0, EXPECTED_ENTITIES_1]) processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( prompt, [image] * len(prompt), model, processor ) all_final_text = [x[0] for x in final_text_with_entities] all_entities = [x[1] for x in final_text_with_entities] self.assertEqual(processed_text[0], EXPECTED_PROCESSED_TEXT_0) self.assertEqual(all_final_text[0], EXPECTED_FINAL_TEXT_0) self.assertListEqual(all_entities[0], EXPECTED_ENTITIES_0)
codingutf8 2023 microsoft research and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing this function prepares a list of pil images or a list of numpy arrays if one specifies numpifytrue or a list of pytorch tensors if one specifies torchifytrue test if it raises when no input is passed both image and text only text only image test with different input formats fmt off no phrase 1 phrase 2 phrases 2 phrases bboxes already specified for the 1st phrase fmt on to match the official microsoft kosmos2 demo from which the expected values here are grabbed fmt off fmt on fmt off fmt on no phrase no phrase 1 phrase no bbox 1 phrase no bbox 1 phrase 1 bbox 1 phrase 1 bbox 1 phrase 2 bboxes could not contain none 2 phrase 2 bboxes no bbox 2 phrase 2 bboxes no bbox 2 phrase 2 bboxes 1 bbox 2 phrase 2 bboxes 1 bbox 2 phrase no box as already specified in the text 1 bbox could not contain none test batch test batch with padding without returntensors padding on the right no padding for the longest sequence test batch with padding with returntensors padding on the right no padding for the longest sequence test with image test with image in batch right padding padding on the right the 1 below is because the part for bos is already added in the beginning of each dynamically computed expected value noqa fmt off fmt on test with image in batch left padding padding on the left the 1 below is because the part for bos is already added in the beginning of each dynamically computed expected value noqa fmt off fmt on no padding for the longest sequence coding utf 8 2023 microsoft research and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing this function prepares a list of pil images or a list of numpy arrays if one specifies numpify true or a list of pytorch tensors if one specifies torchify true test if it raises when no input is passed both image and text only text only image test with different input formats fmt off no phrase 1 phrase 2 phrases 2 phrases bboxes already specified for the 1st phrase fmt on to match the official microsoft kosmos 2 demo from which the expected values here are grabbed fmt off fmt on no phrase 1 phrase no bbox 1 phrase 1 bbox 2 phrase 2 bboxes 1 bbox fmt off fmt on no phrase no phrase 1 phrase no bbox 1 phrase no bbox 1 phrase 1 bbox 1 phrase 1 bbox 1 phrase 2 bboxes could not contain none 2 phrase 2 bboxes no bbox 2 phrase 2 bboxes no bbox 2 phrase 2 bboxes 1 bbox 2 phrase 2 bboxes 1 bbox 2 phrase no box as already specified in the text 1 bbox could not contain none test batch test batch with padding without return_tensors padding on the right no padding for the longest sequence test batch with padding with return_tensors padding on the right no padding for the longest sequence test with image test with image in batch right padding padding on the right the 1 below is because the part for bos is already added in the beginning of each dynamically computed expected value noqa fmt off fmt on test with image in batch left padding padding on the left the 1 below is because the part for bos is already added in the beginning of each dynamically computed expected value noqa fmt off fmt on no padding for the longest sequence
import os import shutil import tempfile import unittest import numpy as np import pytest import requests from transformers.testing_utils import ( get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, require_vision, ) from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, CLIPImageProcessor, Kosmos2Processor, PreTrainedTokenizerFast, XLMRobertaTokenizer, XLMRobertaTokenizerFast, ) SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers @require_vision class Kosmos2ProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = CLIPImageProcessor(use_square_size=True) slow_tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB) fast_tokenizer = XLMRobertaTokenizerFast(__slow_tokenizer=slow_tokenizer) processor = Kosmos2Processor(image_processor, fast_tokenizer) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = Kosmos2Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = Kosmos2Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_processor = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_processor.keys(): self.assertAlmostEqual(input_image_processor[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" encoded_processor = processor(text=input_str, add_eos_token=True) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] ) with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] ) inputs = processor(text=input_str) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) inputs = processor(images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values"]) @require_torch def test_full_processor(self): url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/two_dogs.jpg" processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224") texts = [ "<grounding> Two puppies sit in a field of grass.", "<grounding> <phrase> Two puppies </phrase> sit in a field of grass.", "<grounding> <phrase> Two puppies </phrase> sit in a field of <phrase> grass </phrase>.", "<grounding> <phrase> Two puppies </phrase> <object> <patch_index_0079> <patch_index_1016> </delimiter_of_multi_objects/> <patch_index_0135> <patch_index_1008> </object> sit in a field of <phrase> grass </phrase>.", ] image = Image.open(requests.get(url, stream=True).raw) image_path = os.path.join(self.tmpdirname, "image.jpg") image.save(image_path) image = Image.open(image_path) bboxes = [ [None, []], [[None], [[]], [(79, 1016)], [[(79, 1016)]], [[(79, 1016), (135, 1008)]]], [[[(79, 1016), (135, 1008)], None], [[(79, 1016), (135, 1008)], []], [[(79, 1016), (135, 1008)], (480, 1023)], [[(79, 1016), (135, 1008)], [(480, 1023)]]], [[None, [(480, 1023)]]], ] batch_image = [image] * 4 batch_text = [texts[0], texts[1], texts[1], texts[2]] batch_bboxes = [ None, [[]], [(79, 1016)], [[(79, 1016), (135, 1008)], (480, 1023)], ] expected_input_ids = [ [0, 64012, 1264, 17772, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 106, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 64009, 64493, 65036, 64010, 106, 4, 2], ] EXPECTED_PIXEL_VALUES_1 = np.array( [ [ [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6243883967399597, -0.6243883967399597, -0.5951915383338928], ], [ [-0.20629698038101196, -0.19128920137882233, -0.19128920137882233], [-0.20629698038101196, -0.19128920137882233, -0.17628143727779388], [-0.2213047444820404, -0.20629698038101196, -0.16127367317676544], ], [ [-0.5843556523323059, -0.5701355338096619, -0.5701355338096619], [-0.5843556523323059, -0.5701355338096619, -0.5559154152870178], [-0.5843556523323059, -0.5559154152870178, -0.5416953563690186], ], ] ) EXPECTED_PIXEL_VALUES_2 = np.array( [ [ [-0.4346088469028473, -0.47840413451194763, -0.7849710583686829], [-0.5221993923187256, -0.5076009631156921, -0.755774199962616], [-0.5221993923187256, -0.5076009631156921, -0.7411757707595825], ], [ [-0.2813358008861542, -0.2963435649871826, -0.431413471698761], [-0.26632803678512573, -0.2963435649871826, -0.4764367938041687], [-0.2213047444820404, -0.2813358008861542, -0.49144455790519714], ], [ [-0.5701355338096619, -0.641235888004303, -0.7549964189529419], [-0.5843556523323059, -0.641235888004303, -0.7834365367889404], [-0.5559154152870178, -0.641235888004303, -0.7834365367889404], ], ] ) def check(texts, bboxes, expected_input_ids): outputs = processor(images=None, text=texts, bboxes=bboxes, add_eos_token=True) self.assertListEqual(outputs.input_ids, expected_input_ids) check(texts[0], bboxes[0][0], expected_input_ids[0]) check(texts[0], bboxes[0][1], expected_input_ids[0]) check(texts[1], bboxes[1][0], expected_input_ids[1]) check(texts[1], bboxes[1][1], expected_input_ids[1]) check(texts[1], bboxes[1][2], expected_input_ids[2]) check(texts[1], bboxes[1][3], expected_input_ids[2]) check(texts[1], bboxes[1][4], expected_input_ids[3]) with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[1], bboxes=[[None]]) check(texts[2], bboxes[2][0], expected_input_ids[4]) check(texts[2], bboxes[2][1], expected_input_ids[4]) check(texts[2], bboxes[2][2], expected_input_ids[5]) check(texts[2], bboxes[2][3], expected_input_ids[5]) check(texts[3], bboxes[3][0], expected_input_ids[5]) with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[2], bboxes=[[(79, 1016), (135, 1008)], [None]]) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, add_eos_token=True, ) self.assertListEqual( outputs.input_ids, [expected_input_ids[0], expected_input_ids[1], expected_input_ids[2], expected_input_ids[5]], ) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, padding=True, add_eos_token=True, ) self.assertListEqual( outputs.input_ids[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual(outputs.input_ids[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask[-1], [1] * len(expected_input_ids[5])) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) self.assertListEqual( outputs.input_ids.numpy().tolist()[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask.numpy().tolist()[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], [1] * len(expected_input_ids[5])) num_image_tokens = 64 outputs = processor(images=image, text=texts[0], bboxes=None, add_eos_token=True) self.assertTupleEqual(outputs.pixel_values[0].shape, (3, 224, 224)) self.assertListEqual( outputs.input_ids, [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], ) self.assertListEqual( outputs.image_embeds_position_mask, [0] * 2 + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[0]) - 1), ) np.testing.assert_allclose(outputs.pixel_values[0][:3, :3, :3], EXPECTED_PIXEL_VALUES_1, atol=1e-9) np.testing.assert_allclose(outputs.pixel_values[0][:3, -3:, -3:], EXPECTED_PIXEL_VALUES_2, atol=1e-9) outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) self.assertTupleEqual(outputs.pixel_values.shape, (4, 3, 224, 224)) np.testing.assert_allclose( outputs.pixel_values[:, :3, :3, :3].numpy(), [EXPECTED_PIXEL_VALUES_1] * len(batch_image), atol=1e-9 ) np.testing.assert_allclose( outputs.pixel_values[:, :3, -3:, -3:].numpy(), [EXPECTED_PIXEL_VALUES_2] * len(batch_image), atol=1e-9 ) EXPECTED_IDS_BATCH_RIGHT_PADDING = [ [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH_RIGHT_PADDING = [ [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH_RIGHT_PADDING[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH_RIGHT_PADDING[-1]) self.assertListEqual( outputs.image_embeds_position_mask.numpy().tolist(), [[0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1)] * len(batch_image), ) processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) EXPECTED_IDS_BATCH = [ [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH =[ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] EXPECTED_IMG_POS_MASK_BATCH = [ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 0] + [1] * num_image_tokens + [0] + [0] * len(expected_input_ids[0][1:]), [0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1), ] self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH[0]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[0], EXPECTED_IMG_POS_MASK_BATCH[0]) self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH[-1]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[-1], EXPECTED_IMG_POS_MASK_BATCH[-1])
codingutf8 2018 the microsoft research asia layoutlm team s the hugging face team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license you can also import this e g from testmodelinglayoutlm import layoutlmmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone rangebbox1000 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope self rangebbox rangebbox def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize bbox idstensorself batchsize self seqlength 4 self rangebbox ensure that bbox is legal for i in rangebbox shape0 for j in rangebbox shape1 if bboxi j 3 bboxi j 1 t bboxi j 3 bboxi j 3 bboxi j 1 bboxi j 1 t if bboxi j 2 bboxi j 0 t bboxi j 2 bboxi j 2 bboxi j 0 bboxi j 0 t inputmask none if self useinputmask inputmask idstensorself batchsize self seqlength vocabsize2 tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids bbox tokentypeids inputmask sequencelabels tokenlabels choicelabels def getconfigself return layoutlmconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerrangeself initializerrange def createandcheckmodel self config inputids bbox tokentypeids inputmask sequencelabels tokenlabels choicelabels model layoutlmmodelconfigconfig model totorchdevice model eval result modelinputids bbox attentionmaskinputmask tokentypeidstokentypeids result modelinputids bbox tokentypeidstokentypeids result modelinputids bbox self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckformaskedlm self config inputids bbox tokentypeids inputmask sequencelabels tokenlabels choicelabels model layoutlmformaskedlmconfigconfig model totorchdevice model eval result modelinputids bbox attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckforsequenceclassification self config inputids bbox tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model layoutlmforsequenceclassificationconfig model totorchdevice model eval result model inputids bbox attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids bbox tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model layoutlmfortokenclassificationconfigconfig model totorchdevice model eval result modelinputids bbox attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckforquestionanswering self config inputids bbox tokentypeids inputmask sequencelabels tokenlabels choicelabels model layoutlmforquestionansweringconfigconfig model totorchdevice model eval result model inputids bboxbbox attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids bbox tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids bbox bbox tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class layoutlmmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses layoutlmmodel layoutlmformaskedlm layoutlmforsequenceclassification layoutlmfortokenclassification layoutlmforquestionanswering if istorchavailable else none pipelinemodelmapping documentquestionanswering layoutlmforquestionanswering featureextraction layoutlmmodel fillmask layoutlmformaskedlm textclassification layoutlmforsequenceclassification tokenclassification layoutlmfortokenclassification zeroshot layoutlmforsequenceclassification if istorchavailable else fxcompatible true def setupself self modeltester layoutlmmodeltesterself self configtester configtesterself configclasslayoutlmconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def preparelayoutlmbatchinputs here we prepare a batch of 2 sequences to test a layoutlm forward pass on fmt off inputids torch tensor101 1019 1014 1016 1037 12849 4747 1004 14246 2278 5439 4524 5002 2930 2193 2930 4341 3208 1005 1055 2171 2848 11300 3531 102 101 4070 4034 7020 1024 3058 1015 1013 2861 1013 6070 19274 2772 6205 27814 16147 16147 4343 2047 10283 10969 14389 1012 2338 102 devicetorchdevice noqa e231 attentionmask torch tensor1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 devicetorchdevice noqa e231 bbox torch tensor0 0 0 0 423 237 440 251 427 272 441 287 419 115 437 129 961 885 992 912 256 38 330 58 256 38 330 58 336 42 353 57 360 39 401 56 360 39 401 56 411 39 471 59 479 41 528 59 533 39 630 60 67 113 134 131 141 115 209 132 68 149 133 166 141 149 187 164 195 148 287 165 195 148 287 165 195 148 287 165 295 148 349 165 441 149 492 166 497 149 546 164 64 201 125 218 1000 1000 1000 1000 0 0 0 0 662 150 754 166 665 199 742 211 519 213 554 228 519 213 554 228 134 433 187 454 130 467 204 480 130 467 204 480 130 467 204 480 130 467 204 480 130 467 204 480 314 469 376 482 504 684 582 706 941 825 973 900 941 825 973 900 941 825 973 900 941 825 973 900 610 749 652 765 130 659 168 672 176 657 237 672 238 657 312 672 443 653 628 672 443 653 628 672 716 301 825 317 1000 1000 1000 1000 devicetorchdevice noqa e231 tokentypeids torch tensor0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 devicetorchdevice noqa e231 these are sequence labels i e at the token level labels torch tensor100 10 10 10 9 1 100 7 7 100 7 7 4 2 5 2 8 8 100 100 5 0 3 2 100 100 12 12 12 100 12 10 100 100 100 100 10 12 9 100 100 100 10 10 10 9 12 100 10 100 devicetorchdevice noqa e231 fmt on return inputids attentionmask bbox tokentypeids labels requiretorch class layoutlmmodelintegrationtestunittest testcase slow def testforwardpassnoheadself model layoutlmmodel frompretrainedmicrosoftlayoutlmbaseuncased totorchdevice inputids attentionmask bbox tokentypeids labels preparelayoutlmbatchinputs forward pass outputs modelinputidsinputids bboxbbox attentionmaskattentionmask tokentypeidstokentypeids test the sequence output on 0 3 3 expectedslice torch tensor 0 1785 0 1947 0 0425 0 3254 0 2807 0 2553 0 5391 0 3322 0 3364 devicetorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e3 test the pooled output on 1 3 expectedslice torch tensor0 6580 0 0214 0 8552 devicetorchdevice self asserttruetorch allcloseoutputs pooleroutput1 3 expectedslice atol1e3 slow def testforwardpasssequenceclassificationself initialize model with randomly initialized sequence classification head model layoutlmforsequenceclassification frompretrainedmicrosoftlayoutlmbaseuncased numlabels2 to torchdevice inputids attentionmask bbox tokentypeids preparelayoutlmbatchinputs forward pass outputs model inputidsinputids bboxbbox attentionmaskattentionmask tokentypeidstokentypeids labelstorch tensor1 1 devicetorchdevice test whether we get a loss as a scalar loss outputs loss expectedshape torch size self assertequalloss shape expectedshape test the shape of the logits logits outputs logits expectedshape torch size2 2 self assertequallogits shape expectedshape slow def testforwardpasstokenclassificationself initialize model with randomly initialized token classification head model layoutlmfortokenclassification frompretrainedmicrosoftlayoutlmbaseuncased numlabels13 to torchdevice inputids attentionmask bbox tokentypeids labels preparelayoutlmbatchinputs forward pass outputs model inputidsinputids bboxbbox attentionmaskattentionmask tokentypeidstokentypeids labelslabels test the loss calculation to be around 2 65 expectedloss torch tensor2 65 devicetorchdevice the loss is currently somewhat random and can vary between 0 10 3 atol self asserttruetorch allcloseoutputs loss expectedloss atol0 1 test the shape of the logits logits outputs logits expectedshape torch size2 25 13 self assertequallogits shape expectedshape slow def testforwardpassquestionansweringself initialize model with randomly initialized token classification head model layoutlmforquestionanswering frompretrainedmicrosoftlayoutlmbaseuncased totorchdevice inputids attentionmask bbox tokentypeids labels preparelayoutlmbatchinputs forward pass outputs modelinputidsinputids bboxbbox attentionmaskattentionmask tokentypeidstokentypeids test the shape of the logits expectedshape torch size2 25 self assertequaloutputs startlogits shape expectedshape self assertequaloutputs endlogits shape expectedshape coding utf 8 2018 the microsoft research asia layoutlm team s the hugging face team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license you can also import this e g from test_modeling_layoutlm import layoutlmmodeltester ensure that bbox is legal here we prepare a batch of 2 sequences to test a layoutlm forward pass on fmt off noqa e231 noqa e231 noqa e231 noqa e231 these are sequence labels i e at the token level noqa e231 fmt on forward pass test the sequence output on 0 3 3 test the pooled output on 1 3 initialize model with randomly initialized sequence classification head forward pass test whether we get a loss as a scalar test the shape of the logits initialize model with randomly initialized token classification head forward pass test the loss calculation to be around 2 65 expected_loss torch tensor 2 65 device torch_device the loss is currently somewhat random and can vary between 0 1 0 3 atol self asserttrue torch allclose outputs loss expected_loss atol 0 1 test the shape of the logits initialize model with randomly initialized token classification head forward pass test the shape of the logits
import unittest from transformers import LayoutLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LayoutLMForMaskedLM, LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, ) class LayoutLMModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return LayoutLMConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LayoutLMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox, token_type_ids=token_type_ids) result = model(input_ids, bbox) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LayoutLMForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LayoutLMForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LayoutLMForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LayoutLMForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class LayoutLMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( LayoutLMModel, LayoutLMForMaskedLM, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMForQuestionAnswering, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "document-question-answering": LayoutLMForQuestionAnswering, "feature-extraction": LayoutLMModel, "fill-mask": LayoutLMForMaskedLM, "text-classification": LayoutLMForSequenceClassification, "token-classification": LayoutLMForTokenClassification, "zero-shot": LayoutLMForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True def setUp(self): self.model_tester = LayoutLMModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def prepare_layoutlm_batch_inputs(): input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]],device=torch_device) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],],device=torch_device) bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]],device=torch_device) token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]],device=torch_device) labels = torch.tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]],device=torch_device) return input_ids, attention_mask, bbox, token_type_ids, labels @require_torch class LayoutLMModelIntegrationTest(unittest.TestCase): @slow def test_forward_pass_no_head(self): model = LayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased").to(torch_device) input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) expected_slice = torch.tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3)) expected_slice = torch.tensor([-0.6580, -0.0214, 0.8552], device=torch_device) self.assertTrue(torch.allclose(outputs.pooler_output[1, :3], expected_slice, atol=1e-3)) @slow def test_forward_pass_sequence_classification(self): model = LayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2).to( torch_device ) input_ids, attention_mask, bbox, token_type_ids, _ = prepare_layoutlm_batch_inputs() outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=torch.tensor([1, 1], device=torch_device), ) loss = outputs.loss expected_shape = torch.Size([]) self.assertEqual(loss.shape, expected_shape) logits = outputs.logits expected_shape = torch.Size((2, 2)) self.assertEqual(logits.shape, expected_shape) @slow def test_forward_pass_token_classification(self): model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13).to( torch_device ) input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels ) logits = outputs.logits expected_shape = torch.Size((2, 25, 13)) self.assertEqual(logits.shape, expected_shape) @slow def test_forward_pass_question_answering(self): model = LayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased").to(torch_device) input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) expected_shape = torch.Size((2, 25)) self.assertEqual(outputs.start_logits.shape, expected_shape) self.assertEqual(outputs.end_logits.shape, expected_shape)
codingutf8 2018 the microsoft research asia layoutlm team s the hugging face team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license convert bbox to numpy since tf does not support item assignment ensure that bbox is legal todo joao fix me here we prepare a batch of 2 sequences to test a layoutlm forward pass on fmt off these are sequence labels i e at the token level fmt on forward pass test the sequence output on 0 3 3 test the pooled output on 1 3 initialize model with randomly initialized sequence classification head forward pass test whether we get a loss as a scalar test the shape of the logits initialize model with randomly initialized token classification head forward pass test the shape of the logits initialize model with randomly initialized token classification head forward pass test the shape of the logits coding utf 8 2018 the microsoft research asia layoutlm team s the hugging face team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license convert bbox to numpy since tf does not support item assignment ensure that bbox is legal todo joao fix me here we prepare a batch of 2 sequences to test a layoutlm forward pass on fmt off noqa e231 noqa e231 noqa e231 noqa e231 these are sequence labels i e at the token level noqa e231 fmt on forward pass test the sequence output on 0 3 3 test the pooled output on 1 3 initialize model with randomly initialized sequence classification head forward pass test whether we get a loss as a scalar test the shape of the logits initialize model with randomly initialized token classification head forward pass test the shape of the logits initialize model with randomly initialized token classification head forward pass test the shape of the logits
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class TFLayoutLMModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox).numpy() for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t bbox = tf.convert_to_tensor(bbox) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = LayoutLMConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFLayoutLMModel(config=config) result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox, token_type_ids=token_type_ids) result = model(input_ids, bbox) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFLayoutLMForMaskedLM(config=config) result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFLayoutLMForSequenceClassification(config=config) result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFLayoutLMForTokenClassification(config=config) result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFLayoutLMForQuestionAnswering(config=config) result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class TFLayoutLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = True onnx_min_opset = 10 def setUp(self): self.model_tester = TFLayoutLMModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFLayoutLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("Onnx compliancy broke with TF 2.10") def test_onnx_compliancy(self): pass def prepare_layoutlm_batch_inputs(): input_ids = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) attention_mask = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) bbox = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) token_type_ids = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) labels = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]]) return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class TFLayoutLMModelIntegrationTest(unittest.TestCase): @slow def test_forward_pass_no_head(self): model = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased") input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) expected_slice = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3)) expected_slice = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552]) self.assertTrue(np.allclose(outputs.pooler_output[1, :3], expected_slice, atol=1e-3)) @slow def test_forward_pass_sequence_classification(self): model = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2) input_ids, attention_mask, bbox, token_type_ids, _ = prepare_layoutlm_batch_inputs() outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=tf.convert_to_tensor([1, 1]), ) loss = outputs.loss expected_shape = (2,) self.assertEqual(loss.shape, expected_shape) logits = outputs.logits expected_shape = (2, 2) self.assertEqual(logits.shape, expected_shape) @slow def test_forward_pass_token_classification(self): model = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13) input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels ) logits = outputs.logits expected_shape = tf.convert_to_tensor((2, 25, 13)) self.assertEqual(logits.shape, expected_shape) @slow def test_forward_pass_question_answering(self): model = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased") input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) expected_shape = tf.convert_to_tensor((2, 25)) self.assertEqual(outputs.start_logits.shape, expected_shape) self.assertEqual(outputs.end_logits.shape, expected_shape)
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license with applyocr true fmt off the words and boxes were obtained with tesseract 4 1 1 fmt on with applyocr false coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license with apply_ocr true fmt off the words and boxes were obtained with tesseract 4 1 1 noqa e231 noqa e231 fmt on with apply_ocr false
import unittest from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMv2ImageProcessor class LayoutLMv2ImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, apply_ocr=True, ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.apply_ocr = apply_ocr def prepare_image_processor_dict(self): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_pytesseract class LayoutLMv2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = LayoutLMv2ImageProcessor if is_pytesseract_available() else None def setUp(self): self.image_processor_tester = LayoutLMv2ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "apply_ocr")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_layoutlmv2_integration_test(self): image_processing = LayoutLMv2ImageProcessor() from datasets import load_dataset ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test") image = Image.open(ds[0]["file"]).convert("RGB") encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224)) self.assertEqual(len(encoding.words), len(encoding.boxes)) expected_words = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] expected_boxes = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] self.assertListEqual(encoding.words, expected_words) self.assertListEqual(encoding.boxes, expected_boxes) image_processing = LayoutLMv2ImageProcessor(apply_ocr=False) encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch layoutlmv2 model import unittest from transformers testingutils import requiredetectron2 requiretorch requiretorchmultigpu slow torchdevice from transformers utils import isdetectron2available istorchavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import layoutlmv2config layoutlmv2forquestionanswering layoutlmv2forsequenceclassification layoutlmv2fortokenclassification layoutlmv2model from transformers models layoutlmv2 modelinglayoutlmv2 import layoutlmv2pretrainedmodelarchivelist if isdetectron2available from detectron2 structures imagelist import imagelist class layoutlmv2modeltester def init self parent batchsize2 numchannels3 imagesize4 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize36 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 imagefeaturepoolshape7 7 256 coordinatesize6 shapesize6 numlabels3 numchoices4 scopenone rangebbox1000 self parent parent self batchsize batchsize self numchannels numchannels self imagesize imagesize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self imagefeaturepoolshape imagefeaturepoolshape self coordinatesize coordinatesize self shapesize shapesize self numlabels numlabels self numchoices numchoices self scope scope self rangebbox rangebbox def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize bbox idstensorself batchsize self seqlength 4 self rangebbox ensure that bbox is legal for i in rangebbox shape0 for j in rangebbox shape1 if bboxi j 3 bboxi j 1 t bboxi j 3 bboxi j 3 bboxi j 1 bboxi j 1 t if bboxi j 2 bboxi j 0 t bboxi j 2 bboxi j 2 bboxi j 0 bboxi j 0 t image imagelist torch zerosself batchsize self numchannels self imagesize self imagesize devicetorchdevice self imagesize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels config layoutlmv2config vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange imagefeaturepoolshapeself imagefeaturepoolshape coordinatesizeself coordinatesize shapesizeself shapesize use smaller resnet backbone to make tests faster config detectron2configargsmodel resnets depth 18 config detectron2configargsmodel resnets res2outchannels 64 config detectron2configargsmodel resnets numgroups 1 return config inputids bbox image tokentypeids inputmask sequencelabels tokenlabels def createandcheckmodel self config inputids bbox image tokentypeids inputmask sequencelabels tokenlabels model layoutlmv2modelconfigconfig model totorchdevice model eval result modelinputids bboxbbox imageimage attentionmaskinputmask tokentypeidstokentypeids result modelinputids bboxbbox imageimage tokentypeidstokentypeids result modelinputids bboxbbox imageimage layoutlmv2 has a different expected sequence length namely also visual tokens are added expectedseqlen self seqlength self imagefeaturepoolshape0 self imagefeaturepoolshape1 self parent assertequalresult lasthiddenstate shape self batchsize expectedseqlen self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckforsequenceclassification self config inputids bbox image tokentypeids inputmask sequencelabels tokenlabels config numlabels self numlabels model layoutlmv2forsequenceclassificationconfig model totorchdevice model eval result model inputids bboxbbox imageimage attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids bbox image tokentypeids inputmask sequencelabels tokenlabels config numlabels self numlabels model layoutlmv2fortokenclassificationconfigconfig model totorchdevice model eval result model inputids bboxbbox imageimage attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckforquestionanswering self config inputids bbox image tokentypeids inputmask sequencelabels tokenlabels model layoutlmv2forquestionansweringconfigconfig model totorchdevice model eval result model inputids bboxbbox imageimage attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids bbox image tokentypeids inputmask sequencelabels tokenlabels configandinputs inputsdict inputids inputids bbox bbox image image tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch requiredetectron2 class layoutlmv2modeltestmodeltestermixin pipelinetestermixin unittest testcase testpruning false testtorchscript true testmismatchedshapes false allmodelclasses layoutlmv2model layoutlmv2forsequenceclassification layoutlmv2fortokenclassification layoutlmv2forquestionanswering if istorchavailable else pipelinemodelmapping documentquestionanswering layoutlmv2forquestionanswering featureextraction layoutlmv2model if istorchavailable else def setupself self modeltester layoutlmv2modeltesterself self configtester configtesterself configclasslayoutlmv2config hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs requiretorchmultigpu unittest skip reason layoutlmv2 and its dependency detectron2 have some layers using addmodule which doesn t work well with nn dataparallel def testmultigpudataparallelforwardself pass def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true layoutlmv2 has a different expected sequence length expectedseqlen self modeltester seqlength self modeltester imagefeaturepoolshape0 self modeltester imagefeaturepoolshape1 for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads expectedseqlen expectedseqlen outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads expectedseqlen expectedseqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers layoutlmv2 has a different expected sequence length expectedseqlen self modeltester seqlength self modeltester imagefeaturepoolshape0 self modeltester imagefeaturepoolshape1 self assertlistequal listhiddenstates0 shape2 expectedseqlen self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass unittest skipwe cannot configure detectron2 to output a smaller backbone def testmodelissmallself pass slow def testmodelfrompretrainedself for modelname in layoutlmv2pretrainedmodelarchivelist 1 model layoutlmv2model frompretrainedmodelname self assertisnotnonemodel def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if backbone in name or visualsegmentembedding in name continue if param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def preparelayoutlmv2batchinputs here we prepare a batch of 2 sequences to test a layoutlmv2 forward pass on fmt off inputids torch tensor101 1019 1014 1016 1037 12849 4747 1004 14246 2278 5439 4524 5002 2930 2193 2930 4341 3208 1005 1055 2171 2848 11300 3531 102 101 4070 4034 7020 1024 3058 1015 1013 2861 1013 6070 19274 2772 6205 27814 16147 16147 4343 2047 10283 10969 14389 1012 2338 102 noqa e231 bbox torch tensor0 0 0 0 423 237 440 251 427 272 441 287 419 115 437 129 961 885 992 912 256 38 330 58 256 38 330 58 336 42 353 57 360 39 401 56 360 39 401 56 411 39 471 59 479 41 528 59 533 39 630 60 67 113 134 131 141 115 209 132 68 149 133 166 141 149 187 164 195 148 287 165 195 148 287 165 195 148 287 165 295 148 349 165 441 149 492 166 497 149 546 164 64 201 125 218 1000 1000 1000 1000 0 0 0 0 662 150 754 166 665 199 742 211 519 213 554 228 519 213 554 228 134 433 187 454 130 467 204 480 130 467 204 480 130 467 204 480 130 467 204 480 130 467 204 480 314 469 376 482 504 684 582 706 941 825 973 900 941 825 973 900 941 825 973 900 941 825 973 900 610 749 652 765 130 659 168 672 176 657 237 672 238 657 312 672 443 653 628 672 443 653 628 672 716 301 825 317 1000 1000 1000 1000 noqa e231 image imagelisttorch randn2 3 224 224 imagesizes224 224 224 224 noqa e231 attentionmask torch tensor1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 noqa e231 tokentypeids torch tensor0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 noqa e231 fmt on return inputids bbox image attentionmask tokentypeids requiretorch requiredetectron2 class layoutlmv2modelintegrationtestunittest testcase slow def testinferencenoheadself model layoutlmv2model frompretrainedmicrosoftlayoutlmv2baseuncased totorchdevice inputids bbox image attentionmask tokentypeids preparelayoutlmv2batchinputs forward pass outputs model inputidsinputids totorchdevice bboxbbox totorchdevice imageimage totorchdevice attentionmaskattentionmask totorchdevice tokentypeidstokentypeids totorchdevice verify the sequence output expectedshape torch size 2 inputids shape1 model config imagefeaturepoolshape0 model config imagefeaturepoolshape1 model config hiddensize self assertequaloutputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 1087 0 0727 0 3075 0 0799 0 0427 0 0751 0 0367 0 0480 0 1358 devicetorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e3 verify the pooled output expectedshape torch size2 model config hiddensize self assertequaloutputs pooleroutput shape expectedshape coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch layoutlmv2 model ensure that bbox is legal use smaller resnet backbone to make tests faster layoutlmv2 has a different expected sequence length namely also visual tokens are added layoutlmv2 has a different expected sequence length check that output_attentions also work using config check attention is always last and order is fine layoutlmv2 has a different expected sequence length check that output_hidden_states also work using config here we prepare a batch of 2 sequences to test a layoutlmv2 forward pass on fmt off noqa e231 noqa e231 noqa e231 noqa e231 noqa e231 fmt on forward pass verify the sequence output verify the pooled output
import unittest from transformers.testing_utils import require_detectron2, require_torch, require_torch_multi_gpu, slow, torch_device from transformers.utils import is_detectron2_available, is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LayoutLMv2Config, LayoutLMv2ForQuestionAnswering, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2Model, ) from transformers.models.layoutlmv2.modeling_layoutlmv2 import LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_detectron2_available(): from detectron2.structures.image_list import ImageList class LayoutLMv2ModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=4, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=36, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, image_feature_pool_shape=[7, 7, 256], coordinate_size=6, shape_size=6, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.image_feature_pool_shape = image_feature_pool_shape self.coordinate_size = coordinate_size self.shape_size = shape_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t image = ImageList( torch.zeros(self.batch_size, self.num_channels, self.image_size, self.image_size, device=torch_device), self.image_size, ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = LayoutLMv2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, image_feature_pool_shape=self.image_feature_pool_shape, coordinate_size=self.coordinate_size, shape_size=self.shape_size, ) config.detectron2_config_args["MODEL.RESNETS.DEPTH"] = 18 config.detectron2_config_args["MODEL.RESNETS.RES2_OUT_CHANNELS"] = 64 config.detectron2_config_args["MODEL.RESNETS.NUM_GROUPS"] = 1 return config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels def create_and_check_model( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, image=image, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, image=image) expected_seq_len = self.seq_length + self.image_feature_pool_shape[0] * self.image_feature_pool_shape[1] self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv2ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv2ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "image": image, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch @require_detectron2 class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = True test_mismatched_shapes = False all_model_classes = ( ( LayoutLMv2Model, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2ForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"document-question-answering": LayoutLMv2ForQuestionAnswering, "feature-extraction": LayoutLMv2Model} if is_torch_available() else {} ) def setUp(self): self.model_tester = LayoutLMv2ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @require_torch_multi_gpu @unittest.skip( reason=( "LayoutLMV2 and its dependency `detectron2` have some layers using `add_module` which doesn't work well" " with `nn.DataParallel`" ) ) def test_multi_gpu_data_parallel_forward(self): pass def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True expected_seq_len = ( self.model_tester.seq_length + self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1] ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) expected_seq_len = ( self.model_tester.seq_length + self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1] ) self.assertListEqual( list(hidden_states[0].shape[-2:]), [expected_seq_len, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip("We cannot configure detectron2 to output a smaller backbone") def test_model_is_small(self): pass @slow def test_model_from_pretrained(self): for model_name in LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = LayoutLMv2Model.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "backbone" in name or "visual_segment_embedding" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def prepare_layoutlmv2_batch_inputs(): input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) image = ImageList(torch.randn((2,3,224,224)), image_sizes=[(224,224), (224,224)]) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) return input_ids, bbox, image, attention_mask, token_type_ids @require_torch @require_detectron2 class LayoutLMv2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased").to(torch_device) ( input_ids, bbox, image, attention_mask, token_type_ids, ) = prepare_layoutlmv2_batch_inputs() outputs = model( input_ids=input_ids.to(torch_device), bbox=bbox.to(torch_device), image=image.to(torch_device), attention_mask=attention_mask.to(torch_device), token_type_ids=token_type_ids.to(torch_device), ) expected_shape = torch.Size( ( 2, input_ids.shape[1] + model.config.image_feature_pool_shape[0] * model.config.image_feature_pool_shape[1], model.config.hidden_size, ) ) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.1087, 0.0727, -0.3075], [0.0799, -0.0427, -0.0751], [-0.0367, 0.0480, -0.1358]], device=torch_device ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3)) expected_shape = torch.Size((2, model.config.hidden_size)) self.assertEqual(outputs.pooler_output.shape, expected_shape)
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license with applyocr true fmt off the words and boxes were obtained with tesseract 4 1 1 fmt on with applyocr false coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license with apply_ocr true fmt off the words and boxes were obtained with tesseract 4 1 1 noqa e231 noqa e231 fmt on with apply_ocr false
import unittest from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMv3ImageProcessor class LayoutLMv3ImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, apply_ocr=True, ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.apply_ocr = apply_ocr def prepare_image_processor_dict(self): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_pytesseract class LayoutLMv3ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = LayoutLMv3ImageProcessor if is_pytesseract_available() else None def setUp(self): self.image_processor_tester = LayoutLMv3ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "apply_ocr")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_LayoutLMv3_integration_test(self): image_processing = LayoutLMv3ImageProcessor() from datasets import load_dataset ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test") image = Image.open(ds[0]["file"]).convert("RGB") encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224)) self.assertEqual(len(encoding.words), len(encoding.boxes)) expected_words = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] expected_boxes = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] self.assertListEqual(encoding.words, expected_words) self.assertListEqual(encoding.boxes, expected_boxes) image_processing = LayoutLMv3ImageProcessor(apply_ocr=False) encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch layoutlmv3 model import copy import unittest from transformers models auto import getvalues from transformers testingutils import requiretorch slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelformultiplechoicemapping modelforquestionansweringmapping modelforsequenceclassificationmapping modelfortokenclassificationmapping layoutlmv3config layoutlmv3forquestionanswering layoutlmv3forsequenceclassification layoutlmv3fortokenclassification layoutlmv3model from transformers models layoutlmv3 modelinglayoutlmv3 import layoutlmv3pretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import layoutlmv3imageprocessor class layoutlmv3modeltester def init self parent batchsize2 numchannels3 imagesize4 patchsize2 textseqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize36 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 coordinatesize6 shapesize6 numlabels3 numchoices4 scopenone rangebbox1000 self parent parent self batchsize batchsize self numchannels numchannels self imagesize imagesize self patchsize patchsize self textseqlength textseqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self coordinatesize coordinatesize self shapesize shapesize self numlabels numlabels self numchoices numchoices self scope scope self rangebbox rangebbox layoutlmv3 s sequence length equals the number of text tokens number of patches 1 we add 1 for the cls token self textseqlength textseqlength self imageseqlength imagesize patchsize 2 1 self seqlength self textseqlength self imageseqlength def prepareconfigandinputsself inputids idstensorself batchsize self textseqlength self vocabsize bbox idstensorself batchsize self textseqlength 4 self rangebbox ensure that bbox is legal for i in rangebbox shape0 for j in rangebbox shape1 if bboxi j 3 bboxi j 1 t bboxi j 3 bboxi j 3 bboxi j 1 bboxi j 1 t if bboxi j 2 bboxi j 0 t bboxi j 2 bboxi j 2 bboxi j 0 bboxi j 0 t pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self textseqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self textseqlength self typevocabsize sequencelabels none tokenlabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self textseqlength self numlabels config layoutlmv3config vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerrangeself initializerrange coordinatesizeself coordinatesize shapesizeself shapesize inputsizeself imagesize patchsizeself patchsize return config inputids bbox pixelvalues tokentypeids inputmask sequencelabels tokenlabels def createandcheckmodel self config inputids bbox pixelvalues tokentypeids inputmask sequencelabels tokenlabels model layoutlmv3modelconfigconfig model totorchdevice model eval text image result modelinputids pixelvaluespixelvalues result model inputids bboxbbox pixelvaluespixelvalues attentionmaskinputmask tokentypeidstokentypeids result modelinputids bboxbbox pixelvaluespixelvalues tokentypeidstokentypeids result modelinputids bboxbbox pixelvaluespixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize text only result modelinputids self parent assertequal result lasthiddenstate shape self batchsize self textseqlength self hiddensize image only result modelpixelvaluespixelvalues self parent assertequal result lasthiddenstate shape self batchsize self imageseqlength self hiddensize def createandcheckforsequenceclassification self config inputids bbox pixelvalues tokentypeids inputmask sequencelabels tokenlabels config numlabels self numlabels model layoutlmv3forsequenceclassificationconfig model totorchdevice model eval result model inputids bboxbbox pixelvaluespixelvalues attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids bbox pixelvalues tokentypeids inputmask sequencelabels tokenlabels config numlabels self numlabels model layoutlmv3fortokenclassificationconfigconfig model totorchdevice model eval result model inputids bboxbbox pixelvaluespixelvalues attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self textseqlength self numlabels def createandcheckforquestionanswering self config inputids bbox pixelvalues tokentypeids inputmask sequencelabels tokenlabels model layoutlmv3forquestionansweringconfigconfig model totorchdevice model eval result model inputids bboxbbox pixelvaluespixelvalues attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids bbox pixelvalues tokentypeids inputmask sequencelabels tokenlabels configandinputs inputsdict inputids inputids bbox bbox pixelvalues pixelvalues tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class layoutlmv3modeltestmodeltestermixin pipelinetestermixin unittest testcase testpruning false testtorchscript false testmismatchedshapes false allmodelclasses layoutlmv3model layoutlmv3forsequenceclassification layoutlmv3fortokenclassification layoutlmv3forquestionanswering if istorchavailable else pipelinemodelmapping documentquestionanswering layoutlmv3forquestionanswering featureextraction layoutlmv3model if istorchavailable else todo fix the failed tests def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname documentquestionansweringpipeline is expected to work with this model but it combines the text and visual embedding along the sequence dimension dim 1 which causes an error during postprocessing as pmask has the sequence dimension of the text embedding only see the line embeddingoutput torch catembeddingoutput visualembeddings dim1 return true def setupself self modeltester layoutlmv3modeltesterself self configtester configtesterself configclasslayoutlmv3config hiddensize37 def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict copy deepcopyinputsdict if modelclass in getvaluesmodelformultiplechoicemapping inputsdict k v unsqueeze1 expand1 self modeltester numchoices 1 contiguous if isinstancev torch tensor and v ndim 1 else v for k v in inputsdict items if returnlabels if modelclass in getvaluesmodelformultiplechoicemapping inputsdictlabels torch onesself modeltester batchsize dtypetorch long devicetorchdevice elif modelclass in getvaluesmodelforquestionansweringmapping inputsdictstartpositions torch zeros self modeltester batchsize dtypetorch long devicetorchdevice inputsdictendpositions torch zeros self modeltester batchsize dtypetorch long devicetorchdevice elif modelclass in getvaluesmodelforsequenceclassificationmapping inputsdictlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice elif modelclass in getvaluesmodelfortokenclassificationmapping inputsdictlabels torch zeros self modeltester batchsize self modeltester textseqlength dtypetorch long devicetorchdevice return inputsdict def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs slow def testmodelfrompretrainedself for modelname in layoutlmv3pretrainedmodelarchivelist 1 model layoutlmv3model frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch class layoutlmv3modelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return layoutlmv3imageprocessorapplyocrfalse if isvisionavailable else none slow def testinferencenoheadself model layoutlmv3model frompretrainedmicrosoftlayoutlmv3base totorchdevice imageprocessor self defaultimageprocessor image prepareimg pixelvalues imageprocessorimagesimage returntensorspt pixelvalues totorchdevice inputids torch tensor1 2 bbox torch tensor1 2 3 4 5 6 7 8 unsqueeze0 forward pass outputs model inputidsinputids totorchdevice bboxbbox totorchdevice pixelvaluespixelvalues totorchdevice verify the logits expectedshape torch size1 199 768 self assertequaloutputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 0529 0 3618 0 1632 0 1587 0 1667 0 0400 0 1557 0 1671 0 0505 totorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch layoutlmv3 model layoutlmv3 s sequence length equals the number of text tokens number of patches 1 we add 1 for the cls token ensure that bbox is legal text image text only image only todo fix the failed tests documentquestionansweringpipeline is expected to work with this model but it combines the text and visual embedding along the sequence dimension dim 1 which causes an error during post processing as p_mask has the sequence dimension of the text embedding only see the line embedding_output torch cat embedding_output visual_embeddings dim 1 we will verify our results on an image of cute cats forward pass verify the logits
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMv3Config, LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, LayoutLMv3Model, ) from transformers.models.layoutlmv3.modeling_layoutlmv3 import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMv3ImageProcessor class LayoutLMv3ModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=4, patch_size=2, text_seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=36, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, coordinate_size=6, shape_size=6, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.text_seq_length = text_seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.coordinate_size = coordinate_size self.shape_size = shape_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox self.text_seq_length = text_seq_length self.image_seq_length = (image_size // patch_size) ** 2 + 1 self.seq_length = self.text_seq_length + self.image_seq_length def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox) for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.text_seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels) config = LayoutLMv3Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def create_and_check_model( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv3Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, pixel_values=pixel_values) result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids ) result = model(input_ids, bbox=bbox, pixel_values=pixel_values, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, pixel_values=pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) result = model(input_ids) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) result = model(pixel_values=pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) ) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv3ForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv3ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv3ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class LayoutLMv3ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = False test_mismatched_shapes = False all_model_classes = ( ( LayoutLMv3Model, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, LayoutLMv3ForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"document-question-answering": LayoutLMv3ForQuestionAnswering, "feature-extraction": LayoutLMv3Model} if is_torch_available() else {} ) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = LayoutLMv3ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv3Config, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=torch_device, ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = LayoutLMv3Model.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class LayoutLMv3ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return LayoutLMv3ImageProcessor(apply_ocr=False) if is_vision_available() else None @slow def test_inference_no_head(self): model = LayoutLMv3Model.from_pretrained("microsoft/layoutlmv3-base").to(torch_device) image_processor = self.default_image_processor image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values.to(torch_device) input_ids = torch.tensor([[1, 2]]) bbox = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0) outputs = model( input_ids=input_ids.to(torch_device), bbox=bbox.to(torch_device), pixel_values=pixel_values.to(torch_device), ) expected_shape = torch.Size((1, 199, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow layoutlmv3 model from future import annotations import copy import inspect import unittest import numpy as np from transformers import istfavailable isvisionavailable from transformers models auto import getvalues from transformers testingutils import requiretf slow from transformers utils import cachedproperty from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tflayoutlmv3pretrainedmodelarchivelist tfmodelformultiplechoicemapping tfmodelforquestionansweringmapping tfmodelforsequenceclassificationmapping tfmodelfortokenclassificationmapping layoutlmv3config tflayoutlmv3forquestionanswering tflayoutlmv3forsequenceclassification tflayoutlmv3fortokenclassification tflayoutlmv3model if isvisionavailable from pil import image from transformers import layoutlmv3imageprocessor class tflayoutlmv3modeltester def init self parent batchsize2 numchannels3 imagesize4 patchsize2 textseqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize36 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 coordinatesize6 shapesize6 numlabels3 numchoices4 scopenone rangebbox1000 self parent parent self batchsize batchsize self numchannels numchannels self imagesize imagesize self patchsize patchsize self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self coordinatesize coordinatesize self shapesize shapesize self numlabels numlabels self numchoices numchoices self scope scope self rangebbox rangebbox layoutlmv3 s sequence length equals the number of text tokens number of patches 1 we add 1 for the cls token self textseqlength textseqlength self imageseqlength imagesize patchsize 2 1 self seqlength self textseqlength self imageseqlength def prepareconfigandinputsself inputids idstensorself batchsize self textseqlength self vocabsize bbox idstensorself batchsize self textseqlength 4 self rangebbox bbox bbox numpy ensure that bbox is legal for i in rangebbox shape0 for j in rangebbox shape1 if bboxi j 3 bboxi j 1 tmpcoordinate bboxi j 3 bboxi j 3 bboxi j 1 bboxi j 1 tmpcoordinate if bboxi j 2 bboxi j 0 tmpcoordinate bboxi j 2 bboxi j 2 bboxi j 0 bboxi j 0 tmpcoordinate bbox tf constantbbox pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self textseqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self textseqlength self typevocabsize sequencelabels none tokenlabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self textseqlength self numlabels config layoutlmv3config vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerrangeself initializerrange coordinatesizeself coordinatesize shapesizeself shapesize inputsizeself imagesize patchsizeself patchsize return config inputids bbox pixelvalues tokentypeids inputmask sequencelabels tokenlabels def createandcheckmodelself config inputids bbox pixelvalues tokentypeids inputmask model tflayoutlmv3modelconfigconfig text image result modelinputids pixelvaluespixelvalues trainingfalse result model inputids bboxbbox pixelvaluespixelvalues attentionmaskinputmask tokentypeidstokentypeids trainingfalse result modelinputids bboxbbox pixelvaluespixelvalues trainingfalse self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize text only result modelinputids trainingfalse self parent assertequal result lasthiddenstate shape self batchsize self textseqlength self hiddensize image only result modelpixelvalues pixelvalues trainingfalse self parent assertequal result lasthiddenstate shape self batchsize self imageseqlength self hiddensize def createandcheckforsequenceclassification self config inputids bbox pixelvalues tokentypeids inputmask sequencelabels config numlabels self numlabels model tflayoutlmv3forsequenceclassificationconfigconfig result model inputids bboxbbox pixelvaluespixelvalues attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels trainingfalse self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids bbox pixelvalues tokentypeids inputmask tokenlabels config numlabels self numlabels model tflayoutlmv3fortokenclassificationconfigconfig result model inputids bboxbbox pixelvaluespixelvalues attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels trainingfalse self parent assertequalresult logits shape self batchsize self textseqlength self numlabels def createandcheckforquestionanswering self config inputids bbox pixelvalues tokentypeids inputmask sequencelabels config numlabels 2 model tflayoutlmv3forquestionansweringconfigconfig result model inputids bboxbbox pixelvaluespixelvalues attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels trainingfalse self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids bbox pixelvalues tokentypeids inputmask configandinputs inputsdict inputids inputids bbox bbox pixelvalues pixelvalues tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretf class tflayoutlmv3modeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tflayoutlmv3model tflayoutlmv3forquestionanswering tflayoutlmv3forsequenceclassification tflayoutlmv3fortokenclassification if istfavailable else pipelinemodelmapping documentquestionanswering tflayoutlmv3forquestionanswering featureextraction tflayoutlmv3model if istfavailable else testpruning false testresizeembeddings false testonnx false todo fix the failed tests def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname return true def prepareforclassself inputsdict modelclass returnlabelsfalse dict inputsdict copy deepcopyinputsdict if modelclass in getvaluestfmodelformultiplechoicemapping inputsdict k tf tiletf expanddimsv 1 1 self modeltester numchoices 1 v ndim 1 if isinstancev tf tensor and v ndim 0 else v for k v in inputsdict items if returnlabels if modelclass in getvaluestfmodelformultiplechoicemapping inputsdictlabels tf onesself modeltester batchsize dtypetf int32 elif modelclass in getvaluestfmodelforquestionansweringmapping inputsdictstartpositions tf zerosself modeltester batchsize dtypetf int32 inputsdictendpositions tf zerosself modeltester batchsize dtypetf int32 elif modelclass in getvaluestfmodelforsequenceclassificationmapping inputsdictlabels tf zerosself modeltester batchsize dtypetf int32 elif modelclass in getvaluestfmodelfortokenclassificationmapping inputsdictlabels tf zeros self modeltester batchsize self modeltester textseqlength dtypetf int32 return inputsdict def setupself self modeltester tflayoutlmv3modeltesterself self configtester configtesterself configclasslayoutlmv3config hiddensize37 def testconfigself self configtester runcommontests def testlosscomputationself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig if getattrmodel hfcomputeloss none the number of elements in the loss should be the same as the number of elements in the label preparedforclass self prepareforclassinputsdict copy modelclass returnlabelstrue addedlabel preparedforclass sortedpreparedforclass keys inputsdict keys reversetrue0 expectedlosssize addedlabel shape aslist 1 test that model correctly compute the loss with kwargs preparedforclass self prepareforclassinputsdict copy modelclass returnlabelstrue inputids preparedforclass popinputids loss modelinputids preparedforclass0 self asserttrueloss shape aslist expectedlosssize or loss shape aslist 1 test that model correctly compute the loss when we mask some positions preparedforclass self prepareforclassinputsdict copy modelclass returnlabelstrue inputids preparedforclass popinputids if labels in preparedforclass labels preparedforclasslabels numpy if lenlabels shape 1 and labels shape1 1 labels0 100 preparedforclasslabels tf converttotensorlabels loss modelinputids preparedforclass0 self asserttrueloss shape aslist expectedlosssize or loss shape aslist 1 self asserttruenot np anynp isnanloss numpy test that model correctly compute the loss with a dict preparedforclass self prepareforclassinputsdict copy modelclass returnlabelstrue loss modelpreparedforclass0 self asserttrueloss shape aslist expectedlosssize or loss shape aslist 1 test that model correctly compute the loss with a tuple preparedforclass self prepareforclassinputsdict copy modelclass returnlabelstrue get keys that were added with the prepareforclass function labelkeys preparedforclass keys inputsdict keys signature inspect signaturemodel call parameters signaturenames listsignature keys create a dictionary holding the location of the tensors in the tuple tupleindexmapping 0 inputids for labelkey in labelkeys labelkeyindex signaturenames indexlabelkey tupleindexmappinglabelkeyindex labelkey sortedtupleindexmapping sortedtupleindexmapping items initialize a list with their default values update the values and convert to a tuple listinput for name in signaturenames if name kwargs listinput appendsignaturename default for index value in sortedtupleindexmapping listinputindex preparedforclassvalue tupleinput tuplelistinput send to model loss modeltupleinput 10 self asserttrueloss shape aslist expectedlosssize or loss shape aslist 1 def testmodelself config inputids bbox pixelvalues tokentypeids inputmask self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfig inputids bbox pixelvalues tokentypeids inputmask def testmodelvariousembeddingsself config inputids bbox pixelvalues tokentypeids inputmask self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery config positionembeddingtype type self modeltester createandcheckmodelconfig inputids bbox pixelvalues tokentypeids inputmask def testforsequenceclassificationself config inputids bbox pixelvalues tokentypeids inputmask sequencelabels self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassification config inputids bbox pixelvalues tokentypeids inputmask sequencelabels def testfortokenclassificationself config inputids bbox pixelvalues tokentypeids inputmask tokenlabels self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassification config inputids bbox pixelvalues tokentypeids inputmask tokenlabels def testforquestionansweringself config inputids bbox pixelvalues tokentypeids inputmask sequencelabels self modeltester prepareconfigandinputs self modeltester createandcheckforquestionanswering config inputids bbox pixelvalues tokentypeids inputmask sequencelabels slow def testmodelfrompretrainedself for modelname in tflayoutlmv3pretrainedmodelarchivelist 1 model tflayoutlmv3model frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretf class tflayoutlmv3modelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return layoutlmv3imageprocessorapplyocrfalse if isvisionavailable else none slow def testinferencenoheadself model tflayoutlmv3model frompretrainedmicrosoftlayoutlmv3base imageprocessor self defaultimageprocessor image prepareimg pixelvalues imageprocessorimagesimage returntensorstf pixelvalues inputids tf constant1 2 bbox tf expanddimstf constant1 2 3 4 5 6 7 8 axis0 forward pass outputs modelinputidsinputids bboxbbox pixelvaluespixelvalues trainingfalse verify the logits expectedshape 1 199 768 self assertequaloutputs lasthiddenstate shape expectedshape expectedslice tf constant 0 0529 0 3618 0 1632 0 1587 0 1667 0 0400 0 1557 0 1671 0 0505 self asserttruenp allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow layoutlmv3 model layoutlmv3 s sequence length equals the number of text tokens number of patches 1 we add 1 for the cls token ensure that bbox is legal text image text only image only todo fix the failed tests the number of elements in the loss should be the same as the number of elements in the label test that model correctly compute the loss with kwargs test that model correctly compute the loss when we mask some positions test that model correctly compute the loss with a dict test that model correctly compute the loss with a tuple get keys that were added with the _prepare_for_class function create a dictionary holding the location of the tensors in the tuple initialize a list with their default values update the values and convert to a tuple send to model we will verify our results on an image of cute cats forward pass verify the logits
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMv3Config, TFLayoutLMv3ForQuestionAnswering, TFLayoutLMv3ForSequenceClassification, TFLayoutLMv3ForTokenClassification, TFLayoutLMv3Model, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMv3ImageProcessor class TFLayoutLMv3ModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=4, patch_size=2, text_seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=36, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, coordinate_size=6, shape_size=6, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.coordinate_size = coordinate_size self.shape_size = shape_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox self.text_seq_length = text_seq_length self.image_seq_length = (image_size // patch_size) ** 2 + 1 self.seq_length = self.text_seq_length + self.image_seq_length def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox) bbox = bbox.numpy() for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: tmp_coordinate = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: tmp_coordinate = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = tmp_coordinate bbox = tf.constant(bbox) pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.text_seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels) config = LayoutLMv3Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def create_and_check_model(self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask): model = TFLayoutLMv3Model(config=config) result = model(input_ids, pixel_values=pixel_values, training=False) result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, training=False, ) result = model(input_ids, bbox=bbox, pixel_values=pixel_values, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) result = model(input_ids, training=False) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) result = model({"pixel_values": pixel_values}, training=False) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) ) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels ): config.num_labels = self.num_labels model = TFLayoutLMv3ForSequenceClassification(config=config) result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, training=False, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, token_labels ): config.num_labels = self.num_labels model = TFLayoutLMv3ForTokenClassification(config=config) result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, training=False, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels ): config.num_labels = 2 model = TFLayoutLMv3ForQuestionAnswering(config=config) result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, training=False, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, bbox, pixel_values, token_type_ids, input_mask, _, _) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class TFLayoutLMv3ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFLayoutLMv3Model, TFLayoutLMv3ForQuestionAnswering, TFLayoutLMv3ForSequenceClassification, TFLayoutLMv3ForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( {"document-question-answering": TFLayoutLMv3ForQuestionAnswering, "feature-extraction": TFLayoutLMv3Model} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.int32 ) return inputs_dict def setUp(self): self.model_tester = TFLayoutLMv3ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv3Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) if getattr(model, "hf_compute_loss", None): prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] expected_loss_size = added_label.shape.as_list()[:1] prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) input_ids = prepared_for_class.pop("input_ids") loss = model(input_ids, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) input_ids = prepared_for_class.pop("input_ids") if "labels" in prepared_for_class: labels = prepared_for_class["labels"].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: labels[0] = -100 prepared_for_class["labels"] = tf.convert_to_tensor(labels) loss = model(input_ids, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) loss = model(prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) tuple_index_mapping = {0: "input_ids"} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) loss = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) def test_model(self): ( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, _, _, ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(config, input_ids, bbox, pixel_values, token_type_ids, input_mask) def test_model_various_embeddings(self): ( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, _, _, ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config.position_embedding_type = type self.model_tester.create_and_check_model(config, input_ids, bbox, pixel_values, token_type_ids, input_mask) def test_for_sequence_classification(self): ( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, _, ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels ) def test_for_token_classification(self): ( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, _, token_labels, ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, token_labels ) def test_for_question_answering(self): ( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, _, ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels ) @slow def test_model_from_pretrained(self): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFLayoutLMv3Model.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf class TFLayoutLMv3ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return LayoutLMv3ImageProcessor(apply_ocr=False) if is_vision_available() else None @slow def test_inference_no_head(self): model = TFLayoutLMv3Model.from_pretrained("microsoft/layoutlmv3-base") image_processor = self.default_image_processor image = prepare_img() pixel_values = image_processor(images=image, return_tensors="tf").pixel_values input_ids = tf.constant([[1, 2]]) bbox = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]), axis=0) outputs = model(input_ids=input_ids, bbox=bbox, pixel_values=pixel_values, training=False) expected_shape = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license taken from testtokenizationlayoutxlm layoutxlmtokenizationtest testsavepretrained this function prepares a list of pil images or a list of numpy arrays if one specifies numpifytrue or a list of pytorch tensors if one specifies torchifytrue slow tokenizer fast tokenizer add extra args in the case of overflowing tokens test that we still have 1to1 mapping between the images and inputids sequences that are too long are broken down into multiple sequences set up different use cases tests we verify our implementation on 2 document images from the docvqa dataset case 1 document image classification training inference token classification inference applyocr true not batched verify keys verify image verify inputids this was obtained with tesseract 4 1 1 batched verify keys verify images verify inputids this was obtained with tesseract 4 1 1 case 2 document image classification training inference token classification inference applyocrfalse not batched verify keys verify inputids batched verify keys verify inputids verify bbox case 3 token classification training applyocrfalse not batched verify keys verify inputids verify labels batched verify keys verify inputids verify bbox verify labels case 4 visual question answering inference applyocrtrue not batched verify keys verify inputids this was obtained with tesseract 4 1 1 batched verify keys verify inputids this was obtained with tesseract 4 1 1 verify bbox case 5 visual question answering inference applyocrfalse not batched verify keys verify inputids batched verify keys verify inputids verify bbox 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license taken from test_tokenization_layoutxlm layoutxlmtokenizationtest test_save_pretrained this function prepares a list of pil images or a list of numpy arrays if one specifies numpify true or a list of pytorch tensors if one specifies torchify true slow tokenizer fast tokenizer add extra args in the case of overflowing tokens test that we still have 1 to 1 mapping between the images and input_ids sequences that are too long are broken down into multiple sequences set up different use cases tests we verify our implementation on 2 document images from the docvqa dataset case 1 document image classification training inference token classification inference apply_ocr true not batched verify keys verify image verify input_ids this was obtained with tesseract 4 1 1 fmt skip batched verify keys verify images verify input_ids this was obtained with tesseract 4 1 1 fmt skip case 2 document image classification training inference token classification inference apply_ocr false not batched verify keys verify input_ids batched verify keys verify input_ids verify bbox case 3 token classification training apply_ocr false not batched verify keys verify input_ids verify labels batched verify keys verify input_ids verify bbox verify labels case 4 visual question answering inference apply_ocr true not batched verify keys verify input_ids this was obtained with tesseract 4 1 1 fmt skip batched verify keys verify input_ids this was obtained with tesseract 4 1 1 verify bbox fmt skip case 5 visual question answering inference apply_ocr false not batched verify keys verify input_ids batched verify keys verify input_ids verify bbox
import json import os import shutil import tempfile import unittest from typing import List import numpy as np from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast from transformers.models.layoutxlm import LayoutXLMTokenizer, LayoutXLMTokenizerFast from transformers.testing_utils import ( require_pytesseract, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import FEATURE_EXTRACTOR_NAME, cached_property, is_pytesseract_available if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMv2ImageProcessor, LayoutXLMProcessor @require_pytesseract @require_sentencepiece @require_tokenizers class LayoutXLMProcessorTest(unittest.TestCase): tokenizer_class = LayoutXLMTokenizer rust_tokenizer_class = LayoutXLMTokenizerFast def setUp(self): image_processor_map = { "do_resize": True, "size": 224, "apply_ocr": True, } self.tmpdirname = tempfile.mkdtemp() self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(image_processor_map) + "\n") self.tokenizer_pretrained_name = "hf-internal-testing/tiny-random-layoutxlm" def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer: return self.tokenizer_class.from_pretrained(self.tokenizer_pretrained_name, **kwargs) def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast: return self.rust_tokenizer_class.from_pretrained(self.tokenizer_pretrained_name, **kwargs) def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]: return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)] def get_image_processor(self, **kwargs): return LayoutLMv2ImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): image_processor = self.get_image_processor() tokenizers = self.get_tokenizers() for tokenizer in tokenizers: processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer) processor.save_pretrained(self.tmpdirname) processor = LayoutXLMProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, (LayoutXLMTokenizer, LayoutXLMTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor) def test_save_load_pretrained_additional_features(self): processor = LayoutXLMProcessor(image_processor=self.get_image_processor(), tokenizer=self.get_tokenizer()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30) processor = LayoutXLMProcessor.from_pretrained( self.tmpdirname, use_fast=False, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30, ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, LayoutXLMTokenizer) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor) tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30) processor = LayoutXLMProcessor.from_pretrained( self.tmpdirname, use_xlm=True, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, LayoutXLMTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = LayoutXLMProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False) self.assertListEqual(list(inputs.keys()), processor.model_input_names) @slow def test_overflowing_tokens(self): from datasets import load_dataset datasets = load_dataset("nielsr/funsd") processor = LayoutXLMProcessor.from_pretrained("microsoft/layoutxlm-base", apply_ocr=False) def preprocess_data(examples): images = [Image.open(path).convert("RGB") for path in examples["image_path"]] words = examples["words"] boxes = examples["bboxes"] word_labels = examples["ner_tags"] encoded_inputs = processor( images, words, boxes=boxes, word_labels=word_labels, max_length=512, padding="max_length", truncation=True, return_overflowing_tokens=True, stride=50, return_offsets_mapping=True, return_tensors="pt", ) return encoded_inputs train_data = preprocess_data(datasets["train"]) self.assertEqual(len(train_data["image"]), len(train_data["input_ids"])) @require_sentencepiece @require_torch @require_pytesseract class LayoutXLMProcessorIntegrationTests(unittest.TestCase): @cached_property def get_images(self): from datasets import load_dataset ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test") image_1 = Image.open(ds[0]["file"]).convert("RGB") image_2 = Image.open(ds[1]["file"]).convert("RGB") return image_1, image_2 @cached_property def get_tokenizers(self): slow_tokenizer = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base") fast_tokenizer = LayoutXLMTokenizerFast.from_pretrained("microsoft/layoutxlm-base") return [slow_tokenizer, fast_tokenizer] @slow def test_processor_case_1(self): image_processor = LayoutLMv2ImageProcessor() tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer) input_feat_extract = image_processor(images[0], return_tensors="pt") input_processor = processor(images[0], return_tensors="pt") expected_keys = ["attention_mask", "bbox", "image", "input_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) self.assertAlmostEqual( input_feat_extract["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2 ) expected_decoding = "<s> 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>" decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) input_feat_extract = image_processor(images, return_tensors="pt") input_processor = processor(images, padding=True, return_tensors="pt") expected_keys = ["attention_mask", "bbox", "image", "input_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) self.assertAlmostEqual( input_feat_extract["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2 ) expected_decoding = "<s> 7 ITC Limited REPORT AND ACCOUNTS 2013 ITC’s Brands: An Asset for the Nation The consumer needs and aspirations they fulfil, the benefit they generate for millions across ITC’s value chains, the future-ready capabilities that support them, and the value that they create for the country, have made ITC’s brands national assets, adding to India’s competitiveness. It is ITC’s aspiration to be the No 1 FMCG player in the country, driven by its new FMCG businesses. A recent Nielsen report has highlighted that ITC's new FMCG businesses are the fastest growing among the top consumer goods companies operating in India. ITC takes justifiable pride that, along with generating economic value, these celebrated Indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. DI WILLS * ; LOVE DELIGHTFULLY SOFT SKIN? aia Ans Source: https://www.industrydocuments.ucsf.edu/docs/snbx0223</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>" decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) @slow def test_processor_case_2(self): image_processor = LayoutLMv2ImageProcessor(apply_ocr=False) tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer) words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt") expected_keys = ["input_ids", "bbox", "attention_mask", "image"] actual_keys = list(input_processor.keys()) for key in expected_keys: self.assertIn(key, actual_keys) expected_decoding = "<s> hello world</s>" decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) words = [["hello", "world"], ["my", "name", "is", "niels"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]] input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt") expected_keys = ["attention_mask", "bbox", "image", "input_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) expected_decoding = "<s> hello world</s><pad><pad>" decoding = processor.decode(input_processor.input_ids[0].tolist()) self.assertSequenceEqual(decoding, expected_decoding) expected_bbox = [ [0, 0, 0, 0], [3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000], ] self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox) @slow def test_processor_case_3(self): image_processor = LayoutLMv2ImageProcessor(apply_ocr=False) tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer) words = ["weirdly", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] word_labels = [1, 2] input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt") expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) expected_decoding = "<s> weirdly world</s>" decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) expected_labels = [-100, 1, -100, 2, -100] self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels) words = [["hello", "world"], ["my", "name", "is", "niels"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]] word_labels = [[1, 2], [6, 3, 10, 2]] input_processor = processor( images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt" ) expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) expected_decoding = "<s> my name is niels</s>" decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) expected_bbox = [ [0, 0, 0, 0], [3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000], ] self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox) expected_labels = [-100, 6, 3, 10, 2, -100, -100] self.assertListEqual(input_processor.labels[1].tolist(), expected_labels) @slow def test_processor_case_4(self): image_processor = LayoutLMv2ImageProcessor() tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer) question = "What's his name?" input_processor = processor(images[0], question, return_tensors="pt") expected_keys = ["attention_mask", "bbox", "image", "input_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) expected_decoding = "<s> What's his name?</s></s> 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>" decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) questions = ["How old is he?", "what's the time"] input_processor = processor( images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt" ) expected_keys = ["attention_mask", "bbox", "image", "input_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) expected_decoding = "<s> what's the time</s></s> 7 ITC Limited REPORT AND ACCOUNTS 2013</s>" decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [372, 59, 407, 66], [1000, 1000, 1000, 1000]] self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox) @slow def test_processor_case_5(self): image_processor = LayoutLMv2ImageProcessor(apply_ocr=False) tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer) question = "What's his name?" words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] input_processor = processor(images[0], question, words, boxes, return_tensors="pt") expected_keys = ["attention_mask", "bbox", "image", "input_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) expected_decoding = "<s> What's his name?</s></s> hello world</s>" decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) questions = ["How old is he?", "what's the time"] words = [["hello", "world"], ["my", "name", "is", "niels"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]] input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt") expected_keys = ["attention_mask", "bbox", "image", "input_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) expected_decoding = "<s> How old is he?</s></s> hello world</s><pad><pad>" decoding = processor.decode(input_processor.input_ids[0].tolist()) self.assertSequenceEqual(decoding, expected_decoding) expected_decoding = "<s> what's the time</s></s> my name is niels</s>" decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]] self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing override test in testtokenizationcommon py because of the required input format of the call method of this tokenizer we want to verify that we will be able to save the tokenizer even if the original files that were used to build the tokenizer have been deleted in the meantime we usually have added tokens from the start in tests because our vocab fixtures are smaller than the original vocabs let s not assert this self assertequalvocabsize allsize check correct behaviour if no padtokenid exists and add it eventually test longest and nopadding don t do anything test right padding test left padding test 1 single sequence method is implemented e g not gpt2 test 2 two sequences method is implemented e g not gpt2 we keep this test for backward compatibility but it should be removed when padtomaxlength will be deprecated tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name words boxes self getwordsandboxes paddingsize 10 check correct behaviour if no padtokenid exists and add it eventually self checknopadtokenpaddingtokenizer words paddingidx tokenizer padtokenid check that it correctly pads when a maximum length is specified along with the padding flag set to true tokenizer paddingside right encodedsequence tokenizer encodewords boxesboxes sequencelength lenencodedsequence fixme the next line should be paddingmaxlength to avoid warning paddedsequence tokenizer encode words boxesboxes maxlengthsequencelength paddingsize padtomaxlengthtrue paddedsequencelength lenpaddedsequence assert sequencelength paddingsize paddedsequencelength assert encodedsequence paddingidx paddingsize paddedsequence check that nothing is done when a maximum length is not specified encodedsequence tokenizer encodewords boxesboxes sequencelength lenencodedsequence tokenizer paddingside right paddedsequenceright tokenizer encodewords boxesboxes padtomaxlengthtrue paddedsequencerightlength lenpaddedsequenceright assert sequencelength paddedsequencerightlength assert encodedsequence paddedsequenceright def testpaddingself maxlength50 for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs tokenizerp self tokenizerclass frompretrainedpretrainedname kwargs self assertequaltokenizerp padtokenid tokenizerr padtokenid padtokenid tokenizerp padtokenid encode simple input words boxes self getwordsandboxes inputr tokenizerr encodewords boxesboxes maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp encodewords boxesboxes maxlengthmaxlength padtomaxlengthtrue self assertpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr encodewords boxesboxes maxlengthmaxlength paddingmaxlength inputp tokenizerp encodewords boxesboxes maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr encodewords boxesboxes paddinglongest inputp tokenizerp encodewords boxesboxes paddingtrue self assertpaddedinputmatchinputr inputp leninputr padtokenid encode pair input question words boxes self getquestionwordsandboxes inputr tokenizerr encode question words boxesboxes maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp encode question words boxesboxes maxlengthmaxlength padtomaxlengthtrue self assertpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr encodequestion words boxesboxes maxlengthmaxlength paddingmaxlength inputp tokenizerp encodequestion words boxesboxes maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr encodequestion words boxesboxes paddingtrue inputp tokenizerp encodequestion words boxesboxes paddinglongest self assertpaddedinputmatchinputr inputp leninputr padtokenid encodeplus simple input words boxes self getwordsandboxes inputr tokenizerr encodepluswords boxesboxes maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp encodepluswords boxesboxes maxlengthmaxlength padtomaxlengthtrue self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid self assertsequenceequalinputrattentionmask inputpattentionmask inputr tokenizerr encodepluswords boxesboxes maxlengthmaxlength paddingmaxlength inputp tokenizerp encodepluswords boxesboxes maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid self assertsequenceequalinputrattentionmask inputpattentionmask inputr tokenizerr encodepluswords boxesboxes paddinglongest inputp tokenizerp encodepluswords boxesboxes paddingtrue self assertpaddedinputmatch inputrinputids inputpinputids leninputrinputids padtokenid self assertsequenceequalinputrattentionmask inputpattentionmask encodeplus pair input question words boxes self getquestionwordsandboxes inputr tokenizerr encodeplus question words boxesboxes maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp encodeplus question words boxesboxes maxlengthmaxlength padtomaxlengthtrue self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid self assertsequenceequalinputrattentionmask inputpattentionmask inputr tokenizerr encodeplus question words boxesboxes maxlengthmaxlength paddingmaxlength inputp tokenizerp encodeplus question words boxesboxes maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid self assertsequenceequalinputrattentionmask inputpattentionmask inputr tokenizerr encodeplusquestion words boxesboxes paddinglongest inputp tokenizerp encodeplusquestion words boxesboxes paddingtrue self assertpaddedinputmatch inputrinputids inputpinputids leninputrinputids padtokenid self assertsequenceequalinputrattentionmask inputpattentionmask batchencodeplus simple input words boxes self getwordsandboxesbatch inputr tokenizerr batchencodeplus words boxesboxes maxlengthmaxlength padtomaxlengthtrue inputp tokenizerp batchencodeplus words boxesboxes maxlengthmaxlength padtomaxlengthtrue self assertbatchpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr batchencodeplus words boxesboxes maxlengthmaxlength paddingmaxlength inputp tokenizerp batchencodeplus words boxesboxes maxlengthmaxlength paddingmaxlength self assertbatchpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr batchencodeplus words boxesboxes maxlengthmaxlength paddinglongest inputp tokenizerp batchencodeplus words boxesboxes maxlengthmaxlength paddingtrue self assertbatchpaddedinputmatchinputr inputp leninputrinputids0 padtokenid inputr tokenizerr batchencodepluswords boxesboxes paddinglongest inputp tokenizerp batchencodepluswords boxesboxes paddingtrue self assertbatchpaddedinputmatchinputr inputp leninputrinputids0 padtokenid batchencodeplus pair input questions words boxes self getquestionwordsandboxesbatch inputr tokenizerr batchencodeplus listzipquestions words ispairtrue boxesboxes maxlengthmaxlength truncationtrue paddingmaxlength inputp tokenizerp batchencodeplus listzipquestions words ispairtrue boxesboxes maxlengthmaxlength truncationtrue paddingmaxlength self assertbatchpaddedinputmatchinputr inputp maxlength padtokenid inputr tokenizerr batchencodeplus listzipquestions words ispairtrue boxesboxes paddingtrue inputp tokenizerp batchencodeplus listzipquestions words ispairtrue boxesboxes paddinglongest self assertbatchpaddedinputmatchinputr inputp leninputrinputids0 padtokenid using pad on single examples after tokenization words boxes self getwordsandboxes inputr tokenizerr encodepluswords boxesboxes inputr tokenizerr padinputr inputp tokenizerr encodepluswords boxesboxes inputp tokenizerr padinputp self assertpaddedinputmatch inputrinputids inputpinputids leninputrinputids padtokenid using pad on single examples after tokenization inputr tokenizerr encodepluswords boxesboxes inputr tokenizerr padinputr maxlengthmaxlength paddingmaxlength inputp tokenizerr encodepluswords boxesboxes inputp tokenizerr padinputp maxlengthmaxlength paddingmaxlength self assertpaddedinputmatchinputrinputids inputpinputids maxlength padtokenid using pad after tokenization words boxes self getwordsandboxesbatch inputr tokenizerr batchencodeplus words boxesboxes inputr tokenizerr padinputr inputp tokenizerr batchencodeplus words boxesboxes inputp tokenizerr padinputp self assertbatchpaddedinputmatchinputr inputp leninputrinputids0 padtokenid using pad after tokenization words boxes self getwordsandboxesbatch inputr tokenizerr batchencodeplus words boxesboxes inputr tokenizerr padinputr maxlengthmaxlength paddingmaxlength inputp tokenizerr batchencodeplus words boxesboxes inputp tokenizerr padinputp maxlengthmaxlength paddingmaxlength self assertbatchpaddedinputmatchinputr inputp maxlength padtokenid def testpaddingwarningmessagefasttokenizerself if not self testrusttokenizer return words boxes self getwordsandboxesbatch tokenizerfast self getrusttokenizer encodingfast tokenizerfast words boxesboxes with self assertlogstransformers levelwarning as cm tokenizerfast padencodingfast self assertequallencm records 1 self assertin please note that with a fast tokenizer using the call method is faster than using a method to encode the text followed by a call to the pad method to get a padded encoding cm records0 message if not self testslowtokenizer return tokenizerslow self gettokenizer encodingslow tokenizerslow words boxesboxes with self assertlogslevelwarning as cm we want to assert there are no warnings but the assertlogs method does not support that therefore we are adding a dummy warning and then we will assert it is the only warning logger warningdummy warning tokenizerslow padencodingslow self assertequallencm records 1 self assertin dummy warning cm records0 message def testcallself tests that all call wrap to encodeplus and batchencodeplus tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name test not batched words boxes self getwordsandboxes encodedsequences1 tokenizer encodepluswords boxesboxes encodedsequences2 tokenizerwords boxesboxes self assertequalencodedsequences1 encodedsequences2 test not batched pairs question words boxes self getquestionwordsandboxes encodedsequences1 tokenizer encodepluswords boxesboxes encodedsequences2 tokenizerwords boxesboxes self assertequalencodedsequences1 encodedsequences2 test batched words boxes self getwordsandboxesbatch encodedsequences1 tokenizer batchencodepluswords ispairfalse boxesboxes encodedsequences2 tokenizerwords boxesboxes self assertequalencodedsequences1 encodedsequences2 def testbatchencodeplusbatchsequencelengthself tests that all encoded values have the correct size tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name words boxes self getwordsandboxesbatch encodedsequences tokenizer encodepluswordsexample boxesboxesexample for wordsexample boxesexample in zipwords boxes encodedsequencesbatch tokenizer batchencodepluswords ispairfalse boxesboxes paddingfalse self assertlistequal encodedsequences self convertbatchencodeplusformattoencodeplusencodedsequencesbatch maximumlength len maxencodedsequenceinputids for encodedsequence in encodedsequences keylen check correct behaviour if no padtokenid exists and add it eventually self checknopadtokenpaddingtokenizer words encodedsequencespadded tokenizer encodeplus wordsexample boxesboxesexample maxlengthmaximumlength paddingmaxlength for wordsexample boxesexample in zipwords boxes encodedsequencesbatchpadded tokenizer batchencodeplus words ispairfalse boxesboxes paddingtrue self assertlistequal encodedsequencespadded self convertbatchencodeplusformattoencodeplusencodedsequencesbatchpadded check longest is unsensitive to a max length encodedsequencesbatchpadded1 tokenizer batchencodeplus words ispairfalse boxesboxes paddingtrue encodedsequencesbatchpadded2 tokenizer batchencodeplus words ispairfalse boxesboxes maxlengthmaximumlength 10 paddinglongest for key in encodedsequencesbatchpadded1 keys self assertlistequal encodedsequencesbatchpadded1key encodedsequencesbatchpadded2key check nopadding is unsensitive to a max length encodedsequencesbatchpadded1 tokenizer batchencodeplus words ispairfalse boxesboxes paddingfalse encodedsequencesbatchpadded2 tokenizer batchencodeplus words ispairfalse boxesboxes maxlengthmaximumlength 10 paddingfalse for key in encodedsequencesbatchpadded1 keys self assertlistequal encodedsequencesbatchpadded1key encodedsequencesbatchpadded2key unittest skipbatchencodeplus does not handle overflowing tokens def testbatchencodeplusoverflowingtokensself pass def testbatchencodepluspaddingself test that padded sequences are equivalent between batchencodeplus and encodeplus right padding tests tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name words boxes self getwordsandboxesbatch maxlength 100 check correct behaviour if no padtokenid exists and add it eventually self checknopadtokenpaddingtokenizer words encodedsequences tokenizer encodeplus wordsexample boxesboxesexample maxlengthmaxlength paddingmaxlength for wordsexample boxesexample in zipwords boxes encodedsequencesbatch tokenizer batchencodeplus words ispairfalse boxesboxes maxlengthmaxlength paddingmaxlength self assertlistequal encodedsequences self convertbatchencodeplusformattoencodeplusencodedsequencesbatch left padding tests tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name tokenizer paddingside left words boxes self getwordsandboxesbatch maxlength 100 check correct behaviour if no padtokenid exists and add it eventually self checknopadtokenpaddingtokenizer words encodedsequences tokenizer encodeplus wordsexample boxesboxesexample maxlengthmaxlength paddingmaxlength for wordsexample boxesexample in zipwords boxes encodedsequencesbatch tokenizer batchencodeplus words ispairfalse boxesboxes maxlengthmaxlength paddingmaxlength self assertlistequal encodedsequences self convertbatchencodeplusformattoencodeplusencodedsequencesbatch def testpaddingtomultipleofself tokenizers self gettokenizers for tokenizer in tokenizers with self subtestftokenizer class name if tokenizer padtoken is none self skiptestno padding token else words boxes self getwordsandboxes emptytokens tokenizer paddingtrue padtomultipleof8 normaltokens tokenizerwords boxesboxes paddingtrue padtomultipleof8 for key value in emptytokens items self assertequallenvalue 8 0 fbatchencoding key is not multiple of 8 for key value in normaltokens items self assertequallenvalue 8 0 fbatchencoding key is not multiple of 8 normaltokens tokenizerwords boxesboxes padtomultipleof8 for key value in normaltokens items self assertnotequallenvalue 8 0 fbatchencoding key is not multiple of 8 should also work with truncation normaltokens tokenizerwords boxesboxes paddingtrue truncationtrue padtomultipleof8 for key value in normaltokens items self assertequallenvalue 8 0 fbatchencoding key is not multiple of 8 truncation to something which is not a multiple of padtomultipleof raises an error self assertraises valueerror tokenizer call words boxesboxes paddingtrue truncationtrue maxlength12 padtomultipleof8 def testtokenizerslowstorefullsignatureself signature inspect signatureself tokenizerclass init tokenizer self gettokenizer for parametername parameter in signature parameters items if parameter default inspect parameter empty self assertinparametername tokenizer initkwargs def testbuildinputswithspecialtokensself if not self testslowtokenizer as we don t have a slow version we can t compare the outputs between slow and fast versions return for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs tokenizerp self tokenizerclass frompretrainedpretrainedname kwargs input tokens id words boxes self getwordsandboxes inputsimple tokenizerp encodewords boxesboxes addspecialtokensfalse inputpair tokenizerp encodewords boxesboxes addspecialtokensfalse generate output outputr tokenizerr buildinputswithspecialtokensinputsimple outputp tokenizerp buildinputswithspecialtokensinputsimple self assertequaloutputp outputr generate pair output outputr tokenizerr buildinputswithspecialtokensinputsimple inputpair outputp tokenizerp buildinputswithspecialtokensinputsimple inputpair self assertequaloutputp outputr def testspecialtokensmaskinputpairsself tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name words boxes self getwordsandboxes encodedsequence tokenizer encodewords boxesboxes addspecialtokensfalse encodedsequencedict tokenizer encodeplus words boxesboxes addspecialtokenstrue returnspecialtokensmasktrue addprefixspacefalse encodedsequencewspecial encodedsequencedictinputids specialtokensmask encodedsequencedictspecialtokensmask self assertequallenspecialtokensmask lenencodedsequencewspecial filteredsequence x if not specialtokensmaski else none for i x in enumerateencodedsequencewspecial filteredsequence x for x in filteredsequence if x is not none self assertequalencodedsequence filteredsequence def testspecialtokensmaskself tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name words boxes self getwordsandboxes testing single inputs encodedsequence tokenizer encodewords boxesboxes addspecialtokensfalse encodedsequencedict tokenizer encodeplus words boxesboxes addspecialtokenstrue returnspecialtokensmasktrue encodedsequencewspecial encodedsequencedictinputids specialtokensmask encodedsequencedictspecialtokensmask self assertequallenspecialtokensmask lenencodedsequencewspecial filteredsequence x for i x in enumerateencodedsequencewspecial if not specialtokensmaski self assertequalencodedsequence filteredsequence def testsaveandloadtokenizerself safety check on maxlen default value so we are sure the test works tokenizers self gettokenizers for tokenizer in tokenizers with self subtestftokenizer class name self assertnotequaltokenizer modelmaxlength 42 now let s start the test tokenizers self gettokenizers for tokenizer in tokenizers with self subtestftokenizer class name isolate this from the other tests because we save additional tokensetc words boxes self getwordsandboxes tmpdirname tempfile mkdtemp beforetokens tokenizer encodewords boxesboxes addspecialtokensfalse beforevocab tokenizer getvocab tokenizer savepretrainedtmpdirname aftertokenizer tokenizer class frompretrainedtmpdirname aftertokens aftertokenizer encodewords boxesboxes addspecialtokensfalse aftervocab aftertokenizer getvocab self assertlistequalbeforetokens aftertokens self assertdictequalbeforevocab aftervocab shutil rmtreetmpdirname unittest skipnot implemented def testrightandlefttruncationself pass def testrightandleftpaddingself tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name words boxes self getwordsandboxes sequence sequence paddingsize 10 check correct behaviour if no padtokenid exists and add it eventually self checknopadtokenpaddingtokenizer sequence paddingidx tokenizer padtokenid right padding check that it correctly pads when a maximum length is specified along with the padding flag set to true tokenizer paddingside right encodedsequence tokenizer encodewords boxesboxes sequencelength lenencodedsequence paddedsequence tokenizer encode words boxesboxes maxlengthsequencelength paddingsize paddingmaxlength paddedsequencelength lenpaddedsequence assert sequencelength paddingsize paddedsequencelength assert encodedsequence paddingidx paddingsize paddedsequence left padding check that it correctly pads when a maximum length is specified along with the padding flag set to true tokenizer paddingside left encodedsequence tokenizer encodewords boxesboxes sequencelength lenencodedsequence paddedsequence tokenizer encode words boxesboxes maxlengthsequencelength paddingsize paddingmaxlength paddedsequencelength lenpaddedsequence assert sequencelength paddingsize paddedsequencelength assert paddingidx paddingsize encodedsequence paddedsequence right left padding check that nothing is done for longest and nopadding encodedsequence tokenizer encodewords boxesboxes sequencelength lenencodedsequence tokenizer paddingside right paddedsequenceright tokenizer encodewords boxesboxes paddingtrue paddedsequencerightlength lenpaddedsequenceright assert sequencelength paddedsequencerightlength assert encodedsequence paddedsequenceright tokenizer paddingside left paddedsequenceleft tokenizer encodewords boxesboxes paddinglongest paddedsequenceleftlength lenpaddedsequenceleft assert sequencelength paddedsequenceleftlength assert encodedsequence paddedsequenceleft tokenizer paddingside right paddedsequenceright tokenizer encodewords boxesboxes paddedsequencerightlength lenpaddedsequenceright assert sequencelength paddedsequencerightlength assert encodedsequence paddedsequenceright tokenizer paddingside left paddedsequenceleft tokenizer encodewords boxesboxes paddingfalse paddedsequenceleftlength lenpaddedsequenceleft assert sequencelength paddedsequenceleftlength assert encodedsequence paddedsequenceleft def testtokentypeidsself tokenizers self gettokenizers for tokenizer in tokenizers with self subtestftokenizer class name test 1 single sequence words boxes self getwordsandboxes output tokenizerwords boxesboxes returntokentypeidstrue assert that the token type ids have the same length as the input ids self assertequallenoutputtokentypeids lenoutputinputids assert that the token type ids have the same length as the attention mask self assertequallenoutputtokentypeids lenoutputattentionmask self assertin0 outputtokentypeids self assertnotin1 outputtokentypeids test 2 two sequences question words question words boxes self getquestionwordsandboxes output tokenizerquestion words boxes returntokentypeidstrue assert that the token type ids have the same length as the input ids self assertequallenoutputtokentypeids lenoutputinputids assert that the token type ids have the same length as the attention mask self assertequallenoutputtokentypeids lenoutputattentionmask self assertin0 outputtokentypeids self assertnotin1 outputtokentypeids def testoffsetsmappingself for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs text a wonderful test boxes 1 8 12 20 for in rangelentext no pair tokenswithoffsets tokenizerr encodeplus text boxesboxes returnspecialtokensmasktrue returnoffsetsmappingtrue addspecialtokenstrue addedtokens tokenizerr numspecialtokenstoaddfalse offsets tokenswithoffsetsoffsetmapping assert there is the same number of tokens and offsets self assertequallenoffsets lentokenswithoffsetsinputids assert there is online addedtokens specialtokens self assertequalsumtokenswithoffsetsspecialtokensmask addedtokens pairs text what s his name pair a wonderful test boxes 1 8 12 20 for in rangelenpair tokenswithoffsets tokenizerr encodeplus text pair boxesboxes returnspecialtokensmasktrue returnoffsetsmappingtrue addspecialtokenstrue addedtokens tokenizerr numspecialtokenstoaddtrue offsets tokenswithoffsetsoffsetmapping assert there is the same number of tokens and offsets self assertequallenoffsets lentokenswithoffsetsinputids assert there is online addedtokens specialtokens self assertequalsumtokenswithoffsetsspecialtokensmask addedtokens requiretorch slow def testtorchencodeplussenttomodelself import torch from transformers import modelmapping tokenizermapping modeltokenizermapping mergemodeltokenizermappingsmodelmapping tokenizermapping tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name if tokenizer class not in modeltokenizermapping return configclass modelclass modeltokenizermappingtokenizer class config configclass if config isencoderdecoder or config padtokenid is none return model modelclassconfig make sure the model contains at least the full vocabulary size in its embedding matrix isusingcommonembeddings hasattrmodel getinputembeddings weight assert model getinputembeddings weight shape0 lentokenizer if isusingcommonembeddings else true build sequence words boxes self getwordsandboxes encodedsequence tokenizer encodepluswords boxesboxes returntensorspt batchencodedsequence tokenizer batchencodeplus words words boxes boxes returntensorspt this should not fail with torch nograd saves some time modelencodedsequence modelbatchencodedsequence def testrustandpythonfulltokenizersself if not self testrusttokenizer return if not self testslowtokenizer as we don t have a slow version we can t compare the outputs between slow and fast versions return tokenizer self gettokenizer rusttokenizer self getrusttokenizer words boxes self getwordsandboxes ids tokenizer encodewords boxesboxes addspecialtokensfalse rustids rusttokenizer encodewords boxesboxes addspecialtokensfalse self assertlistequalids rustids ids tokenizer encodewords boxesboxes addspecialtokenstrue rustids rusttokenizer encodewords boxesboxes addspecialtokenstrue self assertlistequalids rustids def testtokenizationpythonrustequalsself if not self testslowtokenizer as we don t have a slow version we can t compare the outputs between slow and fast versions return for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs tokenizerp self tokenizerclass frompretrainedpretrainedname kwargs words boxes self getwordsandboxes ensure basic input match inputp tokenizerp encodepluswords boxesboxes inputr tokenizerr encodepluswords boxesboxes for key in filter lambda x x in inputids tokentypeids attentionmask bbox inputp keys self assertsequenceequalinputpkey inputrkey inputpairsp tokenizerp encodepluswords boxesboxes inputpairsr tokenizerr encodepluswords boxesboxes for key in filter lambda x x in inputids tokentypeids attentionmask bbox inputp keys self assertsequenceequalinputpairspkey inputpairsrkey words hello for in range1000 boxes 1000 1000 1000 1000 for in range1000 ensure truncation match inputp tokenizerp encodepluswords boxesboxes maxlength512 truncationtrue inputr tokenizerr encodepluswords boxesboxes maxlength512 truncationtrue for key in filter lambda x x in inputids tokentypeids attentionmask bbox inputp keys self assertsequenceequalinputpkey inputrkey ensure truncation with stride match inputp tokenizerp encodeplus words boxesboxes maxlength512 truncationtrue stride3 returnoverflowingtokenstrue inputr tokenizerr encodeplus words boxesboxes maxlength512 truncationtrue stride3 returnoverflowingtokenstrue for key in filter lambda x x in inputids tokentypeids attentionmask bbox inputp keys self assertsequenceequalinputpkey inputrkey0 def testembededspecialtokensself if not self testslowtokenizer as we don t have a slow version we can t compare the outputs between slow and fast versions return for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs tokenizerp self tokenizerclass frompretrainedpretrainedname kwargs words boxes self getwordsandboxes tokensr tokenizerr encodeplus words boxesboxes addspecialtokenstrue tokensp tokenizerp encodeplus words boxesboxes addspecialtokenstrue for key in tokensp keys self assertequaltokensrkey tokenspkey if tokentypeids in tokensr self assertequalsumtokensrtokentypeids sumtokensptokentypeids tokensr tokenizerr convertidstotokenstokensrinputids tokensp tokenizerp convertidstotokenstokenspinputids self assertsequenceequaltokensr tokensp def testcompareaddspecialtokensself for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs simplenumspecialtokenstoadd tokenizerr numspecialtokenstoaddpairfalse words boxes self getwordsandboxes tokenize nospecialtokens tokenizerr tokenize joinwords addspecialtokensfalse withspecialtokens tokenizerr tokenize joinwords addspecialtokenstrue self assertequallennospecialtokens lenwithspecialtokens simplenumspecialtokenstoadd encode nospecialtokens tokenizerr encodewords boxesboxes addspecialtokensfalse withspecialtokens tokenizerr encodewords boxesboxes addspecialtokenstrue self assertequallennospecialtokens lenwithspecialtokens simplenumspecialtokenstoadd encodeplus nospecialtokens tokenizerr encodepluswords boxesboxes addspecialtokensfalse withspecialtokens tokenizerr encodepluswords boxesboxes addspecialtokenstrue for key in nospecialtokens keys self assertequal lennospecialtokenskey lenwithspecialtokenskey simplenumspecialtokenstoadd batchencodeplus words boxes self getwordsandboxesbatch nospecialtokens tokenizerr batchencodepluswords boxesboxes addspecialtokensfalse withspecialtokens tokenizerr batchencodepluswords boxesboxes addspecialtokenstrue for key in nospecialtokens keys for ino iwith in zipnospecialtokenskey withspecialtokenskey self assertequallenino leniwith simplenumspecialtokenstoadd slow def testlayoutxlmtruncationintegrationtestself words boxes self getwordsandboxes tokenizer layoutxlmtokenizer frompretrainedmicrosoftlayoutxlmbase modelmaxlength512 for i in range12 512 newencodedinputs tokenizer encodewords boxesboxes maxlengthi truncationtrue ensure that the input ids are less than the max length defined self assertlessequallennewencodedinputs i tokenizer modelmaxlength 20 newencodedinputs tokenizer encodewords boxesboxes truncationtrue droppedencodedinputs tokenizer encodewords boxesboxes truncationtrue ensure that the input ids are still truncated when no maxlength is specified self assertlistequalnewencodedinputs droppedencodedinputs self assertlessequallennewencodedinputs 20 ispttfcrosstest def testbatchencodeplustensorsself tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name words boxes self getwordsandboxesbatch a tensor cannot be build by sequences which are not the same size self assertraisesvalueerror tokenizer batchencodeplus words boxesboxes returntensorspt self assertraisesvalueerror tokenizer batchencodeplus words boxesboxes returntensorstf if tokenizer padtokenid is none self assertraises valueerror tokenizer batchencodeplus words boxesboxes paddingtrue returntensorspt self assertraises valueerror tokenizer batchencodeplus words boxesboxes paddinglongest returntensorstf else pytorchtensor tokenizer batchencodepluswords boxesboxes paddingtrue returntensorspt tensorflowtensor tokenizer batchencodeplus words boxesboxes paddinglongest returntensorstf encodedsequences tokenizer batchencodepluswords boxesboxes paddingtrue for key in encodedsequences keys pytorchvalue pytorchtensorkey tolist tensorflowvalue tensorflowtensorkey numpy tolist encodedvalue encodedsequenceskey self assertequalpytorchvalue tensorflowvalue encodedvalue def testsequenceidsself tokenizers self gettokenizers for tokenizer in tokenizers if not tokenizer isfast continue with self subtestftokenizer class name seq0 test this method seq1 with these inputs boxes 1000 1000 1000 1000 for in rangelenseq1 we want to have sequence 0 and sequence 1 are tagged respectively with 0 and 1 tokenids regardless of whether the model use token type ids we use this assumption in the qa pipeline among other place output tokenizerseq0 split boxesboxes self assertin0 output sequenceids output tokenizerseq0 seq1 boxesboxes self assertin0 output sequenceids self assertin1 output sequenceids if tokenizer numspecialtokenstoaddpairtrue self assertinnone output sequenceids def testspecialtokensinitializationself for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname addedtokens addedtokenspecial lstriptrue tokenizerr self rusttokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs words hey this is a special token split boxes 1000 1000 1000 1000 for in rangelenwords routput tokenizerr encodewords boxesboxes specialtokenid tokenizerr encode special boxes1000 1000 1000 1000 addspecialtokensfalse 0 self asserttruespecialtokenid in routput if self testslowtokenizer tokenizercr self rusttokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs fromslowtrue tokenizerp self tokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs words hey this is a special token split boxes 1000 1000 1000 1000 for in rangelenwords poutput tokenizerp encodewords boxesboxes croutput tokenizercr encodewords boxesboxes self assertequalpoutput routput self assertequalcroutput routput self asserttruespecialtokenid in poutput self asserttruespecialtokenid in croutput def testtrainingnewtokenizerself this feature only exists for fast tokenizers if not self testrusttokenizer return tokenizer self getrusttokenizer newtokenizer tokenizer trainnewfromiteratorsmalltrainingcorpus 100 test we can use the new tokenizer with something not seen during training text this is the how are you boxes 1 2 3 4 5 6 7 8 1 3 4 8 5 6 7 8 4 5 6 7 3 9 2 7 inputs newtokenizertext boxesboxes self assertequalleninputsinputids 2 decodedinput newtokenizer decodeinputsinputids0 skipspecialtokenstrue expectedresult this is the if tokenizer backendtokenizer normalizer is not none expectedresult tokenizer backendtokenizer normalizer normalizestrexpectedresult self assertequalexpectedresult decodedinput we check that the parameters of the tokenizer remained the same check we have the same number of addedtokens for both pair and nonpair inputs self assertequaltokenizer numspecialtokenstoaddfalse newtokenizer numspecialtokenstoaddfalse self assertequaltokenizer numspecialtokenstoaddtrue newtokenizer numspecialtokenstoaddtrue check we have the correct maxlength for both pair and nonpair inputs self assertequaltokenizer maxlensinglesentence newtokenizer maxlensinglesentence self assertequaltokenizer maxlensentencespair newtokenizer maxlensentencespair assert the set of special tokens match as we didn t ask to change them self assertsequenceequal tokenizer allspecialtokensextended newtokenizer allspecialtokensextended self assertdictequaltokenizer specialtokensmap newtokenizer specialtokensmap def testtrainingnewtokenizerwithspecialtokenschangeself this feature only exists for fast tokenizers if not self testrusttokenizer return tokenizer self getrusttokenizer test with a special tokens map classsignature inspect signaturetokenizer class if clstoken in classsignature parameters newtokenizer tokenizer trainnewfromiterator smalltrainingcorpus 100 specialtokensmaptokenizer clstoken cls clsid newtokenizer getvocabcls self assertequalnewtokenizer clstoken cls self assertequalnewtokenizer clstokenid clsid create a new mapping from the special tokens defined in the original tokenizer specialtokenslist specialtokensmixin specialtokensattributes copy specialtokenslist removeadditionalspecialtokens specialtokensmap for token in specialtokenslist get the private one to avoid unnecessary warnings if getattrtokenizer ftoken is not none specialtoken getattrtokenizer token specialtokensmapspecialtoken fspecialtokena train new tokenizer newtokenizer tokenizer trainnewfromiterator smalltrainingcorpus 100 specialtokensmapspecialtokensmap check the changes for token in specialtokenslist get the private one to avoid unnecessary warnings if getattrtokenizer ftoken is none continue specialtoken getattrtokenizer token if specialtoken in specialtokensmap newspecialtoken getattrnewtokenizer token self assertequalspecialtokensmapspecialtoken newspecialtoken newid newtokenizer getvocabnewspecialtoken self assertequalgetattrnewtokenizer ftokenid newid check if the addedtoken string format has been kept for specialtoken in tokenizer allspecialtokensextended if isinstancespecialtoken addedtoken and specialtoken content not in specialtokensmap the special token must appear identically in the list of the new tokenizer self asserttrue specialtoken in newtokenizer allspecialtokensextended f specialtoken should be in newtokenizer allspecialtokensextended elif isinstancespecialtoken addedtoken the special token must appear in the list of the new tokenizer as an object of type addedtoken with the same parameters as the old addedtoken except the content that the user has requested to change specialtokenstr specialtoken content newspecialtokenstr specialtokensmapspecialtokenstr find false for candidate in newtokenizer allspecialtokensextended if isinstancecandidate addedtoken and candidate content newspecialtokenstr and candidate lstrip specialtoken lstrip and candidate rstrip specialtoken rstrip and candidate normalized specialtoken normalized and candidate singleword specialtoken singleword find true break self asserttrue find f newspecialtokenstr doesn t appear in the list f newtokenizer allspecialtokensextended as an addedtoken with the same parameters as f specialtoken in the list tokenizer allspecialtokensextended elif specialtoken not in specialtokensmap the special token must appear identically in the list of the new tokenizer self asserttrue specialtoken in newtokenizer allspecialtokensextended f specialtoken should be in newtokenizer allspecialtokensextended else the special token must appear in the list of the new tokenizer as an object of type string self asserttruespecialtokensmapspecialtoken in newtokenizer allspecialtokensextended test we can use the new tokenizer with something not seen during training words this is hello boxes 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 inputs newtokenizerwords boxesboxes self assertequalleninputsinputids 2 decodedinput newtokenizer decodeinputsinputids0 skipspecialtokenstrue expectedresult this is if tokenizer backendtokenizer normalizer is not none expectedresult tokenizer backendtokenizer normalizer normalizestrexpectedresult self assertequalexpectedresult decodedinput def testprepareformodelself tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers only test prepareformodel for the slow tokenizer if tokenizer class name layoutxlmtokenizerfast continue with self subtestftokenizer class name words boxes self getwordsandboxes preparedinputdict tokenizer prepareformodelwords boxesboxes addspecialtokenstrue inputdict tokenizer encodepluswords boxesboxes addspecialtokenstrue self assertequalinputdict preparedinputdict def testpaddingdifferentmodelinputnameself if not self testslowtokenizer as we don t have a slow version we can t compare the outputs between slow and fast versions return for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs tokenizerp self tokenizerclass frompretrainedpretrainedname kwargs self assertequaltokenizerp padtokenid tokenizerr padtokenid padtokenid tokenizerp padtokenid words boxes self getwordsandboxesbatch inputr tokenizerr batchencodepluswords boxesboxes inputp tokenizerr batchencodepluswords boxesboxes rename encoded batch to inputs inputrinputs inputrtokenizerr modelinputnames0 del inputrtokenizerr modelinputnames0 inputpinputs inputptokenizerp modelinputnames0 del inputptokenizerp modelinputnames0 renaming inputids to inputs tokenizerr modelinputnames inputs tokenizerr modelinputnames1 tokenizerp modelinputnames inputs tokenizerp modelinputnames1 inputr tokenizerr padinputr paddinglongest inputp tokenizerr padinputp paddinglongest maxlength leninputpinputs0 self assertbatchpaddedinputmatch inputr inputp maxlength padtokenid modelmaininputnameinputs def testbatchencodedynamicoverflowingself for tokenizer pretrainedname kwargs in self tokenizerslist tokenizer self rusttokenizerclass frompretrainedpretrainedname kwargs with self subtestftokenizer class name pretrainedname tokenizer class name if istorchavailable returnedtensor pt elif istfavailable returnedtensor tf else returnedtensor jax single example words boxes self getwordsandboxes tokens tokenizer encodeplus words boxesboxes maxlength6 paddingtrue truncationtrue returntensorsreturnedtensor returnoverflowingtokenstrue for key in filterlambda x overflowtosamplemapping not in x tokens keys if key bbox self assertequallentokenskey shape 2 else self assertequallentokenskey shape 3 batch of examples for these 2 examples 3 training examples will be created words boxes self getwordsandboxesbatch tokens tokenizer batchencodeplus words boxesboxes maxlength6 paddingtrue truncationonlyfirst returntensorsreturnedtensor returnoverflowingtokenstrue for key in filterlambda x overflowtosamplemapping not in x tokens keys if key bbox self assertequallentokenskey shape 2 self assertequaltokenskey shape1 6 else self assertequallentokenskey shape 3 self assertequaltokenskey shape1 4 overwrite from testtokenizationcommon to speed up test def testsavepretrainedself if not self testslowtokenizer as we don t have a slow version we can t compare the outputs between slow and fast versions return self tokenizerslist0 self rusttokenizerclass hfinternaltestingtinyrandomlayoutxlm for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname tokenizerr self rusttokenizerclass frompretrainedpretrainedname kwargs tokenizerp self tokenizerclass frompretrainedpretrainedname kwargs tmpdirname2 tempfile mkdtemp tokenizerrfiles tokenizerr savepretrainedtmpdirname2 tokenizerpfiles tokenizerp savepretrainedtmpdirname2 checks it save with the same files the tokenizer json file for the fast one self asserttrueanytokenizer json in f for f in tokenizerrfiles tokenizerrfiles tuplef for f in tokenizerrfiles if tokenizer json not in f self assertsequenceequaltokenizerrfiles tokenizerpfiles checks everything loads correctly in the same way tokenizerrp tokenizerr frompretrainedtmpdirname2 tokenizerpp tokenizerp frompretrainedtmpdirname2 check special tokens are set accordingly on rust and python for key in tokenizerpp specialtokensmap self asserttruehasattrtokenizerrp key self assertequalgetattrtokenizerrp key getattrtokenizerpp key self assertequalgetattrtokenizerrp key id getattrtokenizerpp key id shutil rmtreetmpdirname2 save tokenizer rust legacyformattrue tmpdirname2 tempfile mkdtemp tokenizerrfiles tokenizerr savepretrainedtmpdirname2 legacyformattrue tokenizerpfiles tokenizerp savepretrainedtmpdirname2 checks it save with the same files self assertsequenceequaltokenizerrfiles tokenizerpfiles checks everything loads correctly in the same way tokenizerrp tokenizerr frompretrainedtmpdirname2 tokenizerpp tokenizerp frompretrainedtmpdirname2 check special tokens are set accordingly on rust and python for key in tokenizerpp specialtokensmap self asserttruehasattrtokenizerrp key shutil rmtreetmpdirname2 save tokenizer rust legacyformatfalse tmpdirname2 tempfile mkdtemp tokenizerrfiles tokenizerr savepretrainedtmpdirname2 legacyformatfalse tokenizerpfiles tokenizerp savepretrainedtmpdirname2 checks it saved the tokenizer json file self asserttrueanytokenizer json in f for f in tokenizerrfiles checks everything loads correctly in the same way tokenizerrp tokenizerr frompretrainedtmpdirname2 tokenizerpp tokenizerp frompretrainedtmpdirname2 check special tokens are set accordingly on rust and python for key in tokenizerpp specialtokensmap self asserttruehasattrtokenizerrp key shutil rmtreetmpdirname2 unittest skipto do overwrite this very extensive test def testalignementmethodsself pass unittest skiplayoutxlm tokenizer requires boxes besides sequences def testmaximumencodinglengthpairinputself pass unittest skiplayoutxlm tokenizer requires boxes besides sequences def testmaximumencodinglengthsingleinputself pass unittest skiplayoutxlm tokenizer requires boxes besides sequences def testpretokenizedinputsself pass unittest skiplayoutxlm tokenizer always expects pretokenized inputs def testcomparepretokenizedinputsself pass unittest skiplayoutxlm fast tokenizer does not support prepareformodel def testcompareprepareformodelself pass slow def testonlylabelfirstsubwordself words hello niels boxes 1000 1000 1000 1000 for in rangelenwords wordlabels 0 1 test slow tokenizer tokenizerp layoutxlmtokenizer frompretrainedmicrosoftlayoutxlmbase encoding tokenizerpwords boxesboxes wordlabelswordlabels self assertlistequalencoding labels 100 0 100 1 100 100 tokenizerp layoutxlmtokenizer frompretrainedmicrosoftlayoutxlmbase onlylabelfirstsubwordfalse encoding tokenizerpwords boxesboxes wordlabelswordlabels self assertlistequalencoding labels 100 0 0 1 1 100 test fast tokenizer tokenizerr layoutxlmtokenizerfast frompretrainedmicrosoftlayoutxlmbase encoding tokenizerrwords boxesboxes wordlabelswordlabels self assertlistequalencoding labels 100 0 100 1 100 100 tokenizerr layoutxlmtokenizer frompretrainedmicrosoftlayoutxlmbase onlylabelfirstsubwordfalse encoding tokenizerrwords boxesboxes wordlabelswordlabels self assertlistequalencoding labels 100 0 0 1 1 100 slow def testlayoutxlmintegrationtestself tokenizerp layoutxlmtokenizer frompretrainedmicrosoftlayoutxlmbase tokenizerr layoutxlmtokenizerfast frompretrainedmicrosoftlayoutxlmbase there are 3 cases case 1 document image classification training inference document image token classification inference in which case only words and normalized bounding boxes are provided to the tokenizer case 2 document image token classification training in which case one also provides word labels to the tokenizer case 3 document image visual question answering inference in which case one also provides a question to the tokenizer we need to test all 3 cases both on batched and nonbatched inputs case 1 not batched words boxes self getwordsandboxes expectedresults inputids 0 10 179459 538 3034 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 bbox 0 0 0 0 423 237 440 251 427 272 441 287 427 272 441 287 419 115 437 129 1000 1000 1000 1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 attentionmask 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip encodingp tokenizerpwords boxesboxes paddingmaxlength maxlength20 encodingr tokenizerrwords boxesboxes paddingmaxlength maxlength20 self assertdictequaldictencodingp expectedresults self assertdictequaldictencodingr expectedresults case 1 batched words boxes self getwordsandboxesbatch expectedresults inputids 0 10 179459 538 3034 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 33600 31 759 9351 83 21895 2 1 1 1 1 1 1 1 1 1 1 1 1 bbox 0 0 0 0 423 237 440 251 427 272 441 287 427 272 441 287 419 115 437 129 1000 1000 1000 1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 961 885 992 912 961 885 992 912 256 38 330 58 256 38 330 58 336 42 353 57 34 42 66 69 1000 1000 1000 1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 attentionmask 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip encodingp tokenizerpwords boxesboxes paddingmaxlength maxlength20 encodingr tokenizerrwords boxesboxes paddingmaxlength maxlength20 self assertdictequaldictencodingp expectedresults self assertdictequaldictencodingr expectedresults case 2 not batched words boxes self getwordsandboxes wordlabels 1 2 3 expectedresults inputids 0 10 179459 538 3034 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 bbox 0 0 0 0 423 237 440 251 427 272 441 287 427 272 441 287 419 115 437 129 1000 1000 1000 1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 labels 100 1 2 100 3 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 attentionmask 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip encodingp tokenizerpwords boxesboxes wordlabelswordlabels paddingmaxlength maxlength20 encodingr tokenizerrwords boxesboxes wordlabelswordlabels paddingmaxlength maxlength20 self assertdictequaldictencodingp expectedresults self assertdictequaldictencodingr expectedresults case 2 batched words boxes self getwordsandboxesbatch wordlabels 1 2 3 2 46 17 22 3 expectedresults inputids 0 10 179459 538 3034 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 33600 31 759 9351 83 21895 2 1 1 1 1 1 1 1 1 1 1 1 1 bbox 0 0 0 0 423 237 440 251 427 272 441 287 427 272 441 287 419 115 437 129 1000 1000 1000 1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 961 885 992 912 961 885 992 912 256 38 330 58 256 38 330 58 336 42 353 57 34 42 66 69 1000 1000 1000 1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 labels 100 1 2 100 3 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 2 100 46 17 22 3 100 100 100 100 100 100 100 100 100 100 100 100 100 attentionmask 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip encodingp tokenizerpwords boxesboxes wordlabelswordlabels paddingmaxlength maxlength20 encodingr tokenizerrwords boxesboxes wordlabelswordlabels paddingmaxlength maxlength20 self assertdictequaldictencodingp expectedresults self assertdictequaldictencodingr expectedresults case 3 not batched question words boxes self getquestionwordsandboxes expectedresults inputids 0 2367 25 7 1919 9351 32 2 2 10 179459 538 3034 2 1 1 1 1 1 1 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 bbox 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1000 1000 1000 1000 1000 1000 1000 1000 423 237 440 251 427 272 441 287 427 272 441 287 419 115 437 129 1000 1000 1000 1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip encodingp tokenizerpquestion words boxes paddingmaxlength maxlength20 encodingr tokenizerrquestion words boxes paddingmaxlength maxlength20 self assertdictequaldictencodingp expectedresults self assertdictequaldictencodingr expectedresults case 3 batched questions words boxes self getquestionwordsandboxesbatch expectedresults inputids 0 2367 25 7 1919 9351 32 2 2 10 179459 538 3034 2 1 1 1 1 1 1 0 3642 83 764 35839 32 2 2 2367 10 21 3190 53496 19 2 1 1 1 1 1 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 bbox 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1000 1000 1000 1000 1000 1000 1000 1000 423 237 440 251 427 272 441 287 427 272 441 287 419 115 437 129 1000 1000 1000 1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1000 1000 1000 1000 1000 1000 1000 1000 256 38 330 58 256 38 330 58 336 42 353 57 336 42 353 57 34 42 66 69 34 42 66 69 1000 1000 1000 1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip encodingp tokenizerpquestions words boxes paddingmaxlength maxlength20 encodingr tokenizerrquestions words boxes paddingmaxlength maxlength20 self assertdictequaldictencodingp expectedresults self assertdictequaldictencodingr expectedresults unittest skipdoesn t support another framework than pytorch def testnpencodeplussenttomodelself pass unittest skipdoesn t use sentencepiece def testsentencepiecetokenizeandconverttokenstostringself pass unittest skipdoesn t use sentencepiece def testsentencepiecetokenizeanddecodeself pass unittest skipchat is not supported def testchattemplateself pass coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing override test in test_tokenization_common py because of the required input format of the __call__ method of this tokenizer we want to verify that we will be able to save the tokenizer even if the original files that were used to build the tokenizer have been deleted in the meantime we usually have added tokens from the start in tests because our vocab fixtures are smaller than the original vocabs let s not assert this self assertequal vocab_size all_size check correct behaviour if no pad_token_id exists and add it eventually test longest and no_padding don t do anything test right padding test left padding test 1 single sequence method is implemented e g not gpt 2 test 2 two sequences method is implemented e g not gpt 2 we keep this test for backward compatibility but it should be removed when pad_to_max_length will be deprecated check correct behaviour if no pad_token_id exists and add it eventually check that it correctly pads when a maximum length is specified along with the padding flag set to true fixme the next line should be padding max_length to avoid warning check that nothing is done when a maximum length is not specified encode simple input encode pair input encode_plus simple input encode_plus pair input batch_encode_plus simple input batch_encode_plus pair input using pad on single examples after tokenization using pad on single examples after tokenization using pad after tokenization using pad after tokenization we want to assert there are no warnings but the assertlogs method does not support that therefore we are adding a dummy warning and then we will assert it is the only warning tests that all call wrap to encode_plus and batch_encode_plus test not batched test not batched pairs test batched tests that all encoded values have the correct size check correct behaviour if no pad_token_id exists and add it eventually check longest is unsensitive to a max length check no_padding is unsensitive to a max length test that padded sequences are equivalent between batch_encode_plus and encode_plus right padding tests check correct behaviour if no pad_token_id exists and add it eventually left padding tests check correct behaviour if no pad_token_id exists and add it eventually empty_tokens tokenizer padding true pad_to_multiple_of 8 for key value in empty_tokens items self assertequal len value 8 0 f batchencoding key is not multiple of 8 should also work with truncation truncation to something which is not a multiple of pad_to_multiple_of raises an error as we don t have a slow version we can t compare the outputs between slow and fast versions input tokens id generate output generate pair output add_prefix_space false testing single inputs safety check on max_len default value so we are sure the test works now let s start the test isolate this from the other tests because we save additional tokens etc check correct behaviour if no pad_token_id exists and add it eventually right padding check that it correctly pads when a maximum length is specified along with the padding flag set to true left padding check that it correctly pads when a maximum length is specified along with the padding flag set to true right left padding check that nothing is done for longest and no_padding test 1 single sequence assert that the token type ids have the same length as the input ids assert that the token type ids have the same length as the attention mask test 2 two sequences question words assert that the token type ids have the same length as the input ids assert that the token type ids have the same length as the attention mask no pair assert there is the same number of tokens and offsets assert there is online added_tokens special_tokens pairs assert there is the same number of tokens and offsets assert there is online added_tokens special_tokens make sure the model contains at least the full vocabulary size in its embedding matrix build sequence this should not fail saves some time as we don t have a slow version we can t compare the outputs between slow and fast versions as we don t have a slow version we can t compare the outputs between slow and fast versions ensure basic input match ensure truncation match ensure truncation with stride match as we don t have a slow version we can t compare the outputs between slow and fast versions tokenize encode encode_plus batch_encode_plus ensure that the input ids are less than the max length defined ensure that the input ids are still truncated when no max_length is specified a tensor cannot be build by sequences which are not the same size we want to have sequence 0 and sequence 1 are tagged respectively with 0 and 1 token_ids regardless of whether the model use token type ids we use this assumption in the qa pipeline among other place this feature only exists for fast tokenizers test we can use the new tokenizer with something not seen during training we check that the parameters of the tokenizer remained the same check we have the same number of added_tokens for both pair and non pair inputs check we have the correct max_length for both pair and non pair inputs assert the set of special tokens match as we didn t ask to change them this feature only exists for fast tokenizers test with a special tokens map create a new mapping from the special tokens defined in the original tokenizer get the private one to avoid unnecessary warnings train new tokenizer check the changes get the private one to avoid unnecessary warnings check if the addedtoken string format has been kept the special token must appear identically in the list of the new tokenizer the special token must appear in the list of the new tokenizer as an object of type addedtoken with the same parameters as the old addedtoken except the content that the user has requested to change the special token must appear identically in the list of the new tokenizer the special token must appear in the list of the new tokenizer as an object of type string test we can use the new tokenizer with something not seen during training only test prepare_for_model for the slow tokenizer as we don t have a slow version we can t compare the outputs between slow and fast versions rename encoded batch to inputs renaming input_ids to inputs when calling batch_encode with multiple sequences it can return different number of overflowing encoding for each sequence sequence 1 encoding 1 encoding 2 sequence 2 encoding 1 sequence 3 encoding 1 encoding 2 encoding n this needs to be padded so that it can represented as a tensor single example batch of examples for these 2 examples 3 training examples will be created overwrite from test_tokenization_common to speed up test as we don t have a slow version we can t compare the outputs between slow and fast versions checks it save with the same files the tokenizer json file for the fast one checks everything loads correctly in the same way check special tokens are set accordingly on rust and python self assertequal getattr tokenizer_rp key getattr tokenizer_pp key self assertequal getattr tokenizer_rp key _id getattr tokenizer_pp key _id save tokenizer rust legacy_format true checks it save with the same files checks everything loads correctly in the same way check special tokens are set accordingly on rust and python save tokenizer rust legacy_format false checks it saved the tokenizer json file checks everything loads correctly in the same way check special tokens are set accordingly on rust and python test slow tokenizer test fast tokenizer there are 3 cases case 1 document image classification training inference document image token classification inference in which case only words and normalized bounding boxes are provided to the tokenizer case 2 document image token classification training in which case one also provides word labels to the tokenizer case 3 document image visual question answering inference in which case one also provides a question to the tokenizer we need to test all 3 cases both on batched and non batched inputs case 1 not batched fmt skip case 1 batched fmt skip case 2 not batched fmt skip case 2 batched fmt skip case 3 not batched fmt skip case 3 batched fmt skip
import inspect import shutil import tempfile import unittest from typing import List from transformers import ( AddedToken, LayoutXLMTokenizerFast, SpecialTokensMixin, is_tf_available, is_torch_available, logging, ) from transformers.models.layoutxlm.tokenization_layoutxlm import LayoutXLMTokenizer from transformers.testing_utils import ( get_tests_dir, is_pt_tf_cross_test, require_pandas, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import ( SMALL_TRAINING_CORPUS, TokenizerTesterMixin, filter_non_english, merge_model_tokenizer_mappings, ) logger = logging.get_logger(__name__) SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers @require_pandas class LayoutXLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LayoutXLMTokenizer rust_tokenizer_class = LayoutXLMTokenizerFast test_rust_tokenizer = True from_pretrained_filter = filter_non_english test_seq2seq = False test_sentencepiece = True maxDiff = None def get_words_and_boxes(self): words = ["a", "weirdly", "test"] boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]] return words, boxes def get_words_and_boxes_batch(self): words = [["a", "weirdly", "test"], ["hello", "my", "name", "is", "bob"]] boxes = [ [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]], [[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]], ] return words, boxes def get_question_words_and_boxes(self): question = "what's his name?" words = ["a", "weirdly", "test"] boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]] return question, words, boxes def get_question_words_and_boxes_batch(self): questions = ["what's his name?", "how is he called?"] words = [["a", "weirdly", "test"], ["what", "a", "laif", "gastn"]] boxes = [ [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]], [[256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]], ] return questions, words, boxes def setUp(self): super().setUp() tokenizer = LayoutXLMTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00E9d,running" output_text = "unwanted, running" return input_text, output_text def test_save_sentencepiece_tokenizer(self) -> None: if not self.test_sentencepiece or not self.test_slow_tokenizer: return words, boxes = self.get_words_and_boxes() tokenizer_slow_1 = self.get_tokenizer() encoding_tokenizer_slow_1 = tokenizer_slow_1( words, boxes=boxes, ) tmpdirname_1 = tempfile.mkdtemp() tmpdirname_2 = tempfile.mkdtemp() tokenizer_slow_1.save_pretrained(tmpdirname_1) tokenizer_slow_2 = self.tokenizer_class.from_pretrained(tmpdirname_1) encoding_tokenizer_slow_2 = tokenizer_slow_2( words, boxes=boxes, ) shutil.rmtree(tmpdirname_1) tokenizer_slow_2.save_pretrained(tmpdirname_2) tokenizer_slow_3 = self.tokenizer_class.from_pretrained(tmpdirname_2) encoding_tokenizer_slow_3 = tokenizer_slow_3( words, boxes=boxes, ) shutil.rmtree(tmpdirname_2) self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_2) self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_3) def test_split_special_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutxlm-base") _, _, boxes = self.get_question_words_and_boxes() special_token = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"additional_special_tokens": [special_token]}) encoded_special_token = tokenizer.tokenize(special_token, boxes=boxes, add_special_tokens=False) self.assertEqual(len(encoded_special_token), 1) encoded_split_special_token = tokenizer.tokenize( special_token, add_special_tokens=False, split_special_tokens=True, boxes=boxes ) self.assertTrue(len(encoded_split_special_token) > 1) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutxlm-base") question, words, boxes = self.get_question_words_and_boxes() text = tokenizer.encode( question.split(), boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))], add_special_tokens=False, ) text_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_pair == [0] + text + [2] + [2] + text_2 + [2] def test_offsets_with_special_characters(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) words, boxes = self.get_words_and_boxes() words[1] = tokenizer_r.mask_token tokens = tokenizer_r.encode_plus( words, boxes=boxes, return_attention_mask=False, return_token_type_ids=False, return_offsets_mapping=True, add_special_tokens=True, ) expected_results = [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "▁a"), ((0, 6), tokenizer_r.mask_token), ((0, 4), "▁test"), ((0, 0), tokenizer_r.sep_token), ] self.assertEqual( [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]) ) self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"]) def test_add_special_tokens(self): tokenizers: List[LayoutXLMTokenizer] = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): special_token = "[SPECIAL_TOKEN]" special_token_box = [1000, 1000, 1000, 1000] tokenizer.add_special_tokens({"cls_token": special_token}) encoded_special_token = tokenizer.encode( [special_token], boxes=[special_token_box], add_special_tokens=False ) self.assertEqual(len(encoded_special_token), 1) decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True) self.assertTrue(special_token not in decoded) def test_add_tokens_tokenizer(self): tokenizers: List[LayoutXLMTokenizer] = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) new_toks = ["aaaaa", "bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) words = "aaaaa bbbbbb low cccccccccdddddddd l".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) words = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] tokens = tokenizer.encode( words, boxes=boxes, add_special_tokens=False, ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokens[-3]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-2], tokenizer.pad_token_id) @require_tokenizers def test_encode_decode_with_spaces(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)] tokenizer.add_tokens(new_toks) input = "[ABC][DEF][ABC][DEF]" if self.space_between_special_tokens: output = "[ABC] [DEF] [ABC] [DEF]" else: output = input encoded = tokenizer.encode(input.split(), boxes=boxes, add_special_tokens=False) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens) self.assertIn(decoded, [output, output.lower()]) def test_encode_plus_with_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() self._check_no_pad_token_padding(tokenizer, words) padding_size = 10 padding_idx = tokenizer.pad_token_id encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_special_tokens_mask=True) input_ids = encoded_sequence["input_ids"] special_tokens_mask = encoded_sequence["special_tokens_mask"] sequence_length = len(input_ids) tokenizer.padding_side = "right" not_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, padding=False, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertTrue(sequence_length == not_padded_sequence_length) self.assertTrue(input_ids == not_padded_input_ids) self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask) not_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, padding=False, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertTrue(sequence_length == not_padded_sequence_length) self.assertTrue(input_ids == not_padded_input_ids) self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask) tokenizer.padding_side = "right" right_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length", return_special_tokens_mask=True, ) right_padded_input_ids = right_padded_sequence["input_ids"] right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"] right_padded_sequence_length = len(right_padded_input_ids) self.assertTrue(sequence_length + padding_size == right_padded_sequence_length) self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids) self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask) tokenizer.padding_side = "left" left_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length", return_special_tokens_mask=True, ) left_padded_input_ids = left_padded_sequence["input_ids"] left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"] left_padded_sequence_length = len(left_padded_input_ids) self.assertTrue(sequence_length + padding_size == left_padded_sequence_length) self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids) self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask) if "token_type_ids" in tokenizer.model_input_names: token_type_ids = encoded_sequence["token_type_ids"] left_padded_token_type_ids = left_padded_sequence["token_type_ids"] right_padded_token_type_ids = right_padded_sequence["token_type_ids"] assert token_type_ids + [0] * padding_size == right_padded_token_type_ids assert [0] * padding_size + token_type_ids == left_padded_token_type_ids if "attention_mask" in tokenizer.model_input_names: attention_mask = encoded_sequence["attention_mask"] right_padded_attention_mask = right_padded_sequence["attention_mask"] left_padded_attention_mask = left_padded_sequence["attention_mask"] self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask) self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask) def test_internal_consistency(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() tokens = [] for word in words: tokens.extend(tokenizer.tokenize(word)) ids = tokenizer.convert_tokens_to_ids(tokens) ids_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) self.assertListEqual(ids, ids_2) tokens_2 = tokenizer.convert_ids_to_tokens(ids) self.assertNotEqual(len(tokens_2), 0) text_2 = tokenizer.decode(ids) self.assertIsInstance(text_2, str) output_text = "a weirdly test" self.assertEqual(text_2, output_text) def test_mask_output(self): tokenizers = self.get_tokenizers(fast=False, do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() if ( tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer" and "token_type_ids" in tokenizer.model_input_names ): information = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True) sequences, mask = information["input_ids"], information["token_type_ids"] self.assertEqual(len(sequences), len(mask)) def test_number_of_added_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences) ) question, words, boxes = self.get_question_words_and_boxes() sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False) attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences) ) def test_padding_to_max_length(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() padding_size = 10 self._check_no_pad_token_padding(tokenizer, words) padding_idx = tokenizer.pad_token_id tokenizer.padding_side = "right" encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, pad_to_max_length=True ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert encoded_sequence + [padding_idx] * padding_size == padded_sequence encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(words, boxes=boxes, pad_to_max_length=True) padded_sequence_right_length = len(padded_sequence_right) assert sequence_length == padded_sequence_right_length assert encoded_sequence == padded_sequence_right def test_padding(self, max_length=50): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id words, boxes = self.get_words_and_boxes() input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True) input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(words, boxes=boxes, padding="longest") input_p = tokenizer_p.encode(words, boxes=boxes, padding=True) self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) question, words, boxes = self.get_question_words_and_boxes() input_r = tokenizer_r.encode( question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode( question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(question, words, boxes=boxes, padding=True) input_p = tokenizer_p.encode(question, words, boxes=boxes, padding="longest") self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) words, boxes = self.get_words_and_boxes() input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True) input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus(words, boxes=boxes, padding="longest") input_p = tokenizer_p.encode_plus(words, boxes=boxes, padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) question, words, boxes = self.get_question_words_and_boxes() input_r = tokenizer_r.encode_plus( question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode_plus( question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus( question, words, boxes=boxes, max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode_plus( question, words, boxes=boxes, max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, padding="longest") input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, max_length=max_length, pad_to_max_length=True, ) input_p = tokenizer_p.batch_encode_plus( words, boxes=boxes, max_length=max_length, pad_to_max_length=True, ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding="longest", ) input_p = tokenizer_p.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding=True, ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, padding="longest") input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, padding=True) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) questions, words, boxes = self.get_question_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, max_length=max_length, truncation=True, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, max_length=max_length, truncation=True, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, padding=True, ) input_p = tokenizer_p.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, padding="longest", ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) words, boxes = self.get_words_and_boxes() input_r = tokenizer_r.encode_plus(words, boxes=boxes) input_r = tokenizer_r.pad(input_r) input_p = tokenizer_r.encode_plus(words, boxes=boxes) input_p = tokenizer_r.pad(input_p) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) input_r = tokenizer_r.encode_plus(words, boxes=boxes) input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_r.encode_plus(words, boxes=boxes) input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_r = tokenizer_r.pad(input_r) input_p = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_p = tokenizer_r.pad(input_p) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) def test_padding_warning_message_fast_tokenizer(self): if not self.test_rust_tokenizer: return words, boxes = self.get_words_and_boxes_batch() tokenizer_fast = self.get_rust_tokenizer() encoding_fast = tokenizer_fast( words, boxes=boxes, ) with self.assertLogs("transformers", level="WARNING") as cm: tokenizer_fast.pad(encoding_fast) self.assertEqual(len(cm.records), 1) self.assertIn( "Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to" " encode the text followed by a call to the `pad` method to get a padded encoding.", cm.records[0].message, ) if not self.test_slow_tokenizer: return tokenizer_slow = self.get_tokenizer() encoding_slow = tokenizer_slow( words, boxes=boxes, ) with self.assertLogs(level="WARNING") as cm: logger.warning("Dummy warning") tokenizer_slow.pad(encoding_slow) self.assertEqual(len(cm.records), 1) self.assertIn( "Dummy warning", cm.records[0].message, ) def test_call(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes) encoded_sequences_2 = tokenizer(words, boxes=boxes) self.assertEqual(encoded_sequences_1, encoded_sequences_2) question, words, boxes = self.get_question_words_and_boxes() encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes) encoded_sequences_2 = tokenizer(words, boxes=boxes) self.assertEqual(encoded_sequences_1, encoded_sequences_2) words, boxes = self.get_words_and_boxes_batch() encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes) encoded_sequences_2 = tokenizer(words, boxes=boxes) self.assertEqual(encoded_sequences_1, encoded_sequences_2) def test_batch_encode_plus_batch_sequence_length(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes_batch() encoded_sequences = [ tokenizer.encode_plus(words_example, boxes=boxes_example) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) maximum_length = len( max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len) ) self._check_no_pad_token_padding(tokenizer, words) encoded_sequences_padded = [ tokenizer.encode_plus( words_example, boxes=boxes_example, max_length=maximum_length, padding="max_length" ) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch_padded = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, padding=True ) self.assertListEqual( encoded_sequences_padded, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded), ) encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, padding=True ) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding="longest" ) for key in encoded_sequences_batch_padded_1.keys(): self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, padding=False ) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding=False ) for key in encoded_sequences_batch_padded_1.keys(): self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) @unittest.skip("batch_encode_plus does not handle overflowing tokens.") def test_batch_encode_plus_overflowing_tokens(self): pass def test_batch_encode_plus_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes_batch() max_length = 100 self._check_no_pad_token_padding(tokenizer, words) encoded_sequences = [ tokenizer.encode_plus( words_example, boxes=boxes_example, max_length=max_length, padding="max_length" ) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokenizer.padding_side = "left" words, boxes = self.get_words_and_boxes_batch() max_length = 100 self._check_no_pad_token_padding(tokenizer, words) encoded_sequences = [ tokenizer.encode_plus( words_example, boxes=boxes_example, max_length=max_length, padding="max_length" ) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) def test_padding_to_multiple_of(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest("No padding token.") else: words, boxes = self.get_words_and_boxes() normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") self.assertRaises( ValueError, tokenizer.__call__, words, boxes=boxes, padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) def test_tokenizer_slow_store_full_signature(self): signature = inspect.signature(self.tokenizer_class.__init__) tokenizer = self.get_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty: self.assertIn(parameter_name, tokenizer.init_kwargs) def test_build_inputs_with_special_tokens(self): if not self.test_slow_tokenizer: return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) words, boxes = self.get_words_and_boxes() input_simple = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False) input_pair = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False) output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple) output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple) self.assertEqual(output_p, output_r) output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair) output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair) self.assertEqual(output_p, output_r) def test_special_tokens_mask_input_pairs(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True, ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) ] filtered_sequence = [x for x in filtered_sequence if x is not None] self.assertEqual(encoded_sequence, filtered_sequence) def test_special_tokens_mask(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]] self.assertEqual(encoded_sequence, filtered_sequence) def test_save_and_load_tokenizer(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() tmpdirname = tempfile.mkdtemp() before_tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(words, boxes=boxes, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) shutil.rmtree(tmpdirname) @unittest.skip("Not implemented") def test_right_and_left_truncation(self): pass def test_right_and_left_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() sequence = "Sequence" padding_size = 10 self._check_no_pad_token_padding(tokenizer, sequence) padding_idx = tokenizer.pad_token_id tokenizer.padding_side = "right" encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert encoded_sequence + [padding_idx] * padding_size == padded_sequence tokenizer.padding_side = "left" encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert [padding_idx] * padding_size + encoded_sequence == padded_sequence encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(words, boxes=boxes, padding=True) padded_sequence_right_length = len(padded_sequence_right) assert sequence_length == padded_sequence_right_length assert encoded_sequence == padded_sequence_right tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding="longest") padded_sequence_left_length = len(padded_sequence_left) assert sequence_length == padded_sequence_left_length assert encoded_sequence == padded_sequence_left tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(words, boxes=boxes) padded_sequence_right_length = len(padded_sequence_right) assert sequence_length == padded_sequence_right_length assert encoded_sequence == padded_sequence_right tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding=False) padded_sequence_left_length = len(padded_sequence_left) assert sequence_length == padded_sequence_left_length assert encoded_sequence == padded_sequence_left def test_token_type_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() output = tokenizer(words, boxes=boxes, return_token_type_ids=True) self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"])) self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"])) self.assertIn(0, output["token_type_ids"]) self.assertNotIn(1, output["token_type_ids"]) question, words, boxes = self.get_question_words_and_boxes() output = tokenizer(question, words, boxes, return_token_type_ids=True) self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"])) self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"])) self.assertIn(0, output["token_type_ids"]) self.assertNotIn(1, output["token_type_ids"]) def test_offsets_mapping(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) text = ["a", "wonderful", "test"] boxes = [[1, 8, 12, 20] for _ in range(len(text))] tokens_with_offsets = tokenizer_r.encode_plus( text, boxes=boxes, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True, ) added_tokens = tokenizer_r.num_special_tokens_to_add(False) offsets = tokens_with_offsets["offset_mapping"] self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) text = "what's his name" pair = ["a", "wonderful", "test"] boxes = [[1, 8, 12, 20] for _ in range(len(pair))] tokens_with_offsets = tokenizer_r.encode_plus( text, pair, boxes=boxes, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True, ) added_tokens = tokenizer_r.num_special_tokens_to_add(True) offsets = tokens_with_offsets["offset_mapping"] self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) @require_torch @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import MODEL_MAPPING, TOKENIZER_MAPPING MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING) tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: return config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: return model = model_class(config) is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight") assert ( (model.get_input_embeddings().weight.shape[0] >= len(tokenizer)) if is_using_common_embeddings else True ) words, boxes = self.get_words_and_boxes() encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_tensors="pt") batch_encoded_sequence = tokenizer.batch_encode_plus( [words, words], [boxes, boxes], return_tensors="pt" ) with torch.no_grad(): model(**encoded_sequence) model(**batch_encoded_sequence) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return if not self.test_slow_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() words, boxes = self.get_words_and_boxes() ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=False) self.assertListEqual(ids, rust_ids) ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=True) rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=True) self.assertListEqual(ids, rust_ids) def test_tokenization_python_rust_equals(self): if not self.test_slow_tokenizer: return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) words, boxes = self.get_words_and_boxes() input_p = tokenizer_p.encode_plus(words, boxes=boxes) input_r = tokenizer_r.encode_plus(words, boxes=boxes) for key in filter( lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys() ): self.assertSequenceEqual(input_p[key], input_r[key]) input_pairs_p = tokenizer_p.encode_plus(words, boxes=boxes) input_pairs_r = tokenizer_r.encode_plus(words, boxes=boxes) for key in filter( lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys() ): self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key]) words = ["hello" for _ in range(1000)] boxes = [[1000, 1000, 1000, 1000] for _ in range(1000)] input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=512, truncation=True) input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=512, truncation=True) for key in filter( lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys() ): self.assertSequenceEqual(input_p[key], input_r[key]) input_p = tokenizer_p.encode_plus( words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True ) input_r = tokenizer_r.encode_plus( words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True ) for key in filter( lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys() ): self.assertSequenceEqual(input_p[key], input_r[key][0]) def test_embeded_special_tokens(self): if not self.test_slow_tokenizer: return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) words, boxes = self.get_words_and_boxes() tokens_r = tokenizer_r.encode_plus( words, boxes=boxes, add_special_tokens=True, ) tokens_p = tokenizer_p.encode_plus( words, boxes=boxes, add_special_tokens=True, ) for key in tokens_p.keys(): self.assertEqual(tokens_r[key], tokens_p[key]) if "token_type_ids" in tokens_r: self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_r, tokens_p) def test_compare_add_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False) words, boxes = self.get_words_and_boxes() no_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=False) with_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=True) self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add) no_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False) with_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=True) self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add) no_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=False) with_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True) for key in no_special_tokens.keys(): self.assertEqual( len(no_special_tokens[key]), len(with_special_tokens[key]) - simple_num_special_tokens_to_add, ) words, boxes = self.get_words_and_boxes_batch() no_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=False) with_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=True) for key in no_special_tokens.keys(): for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]): self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add) @slow def test_layoutxlm_truncation_integration_test(self): words, boxes = self.get_words_and_boxes() tokenizer = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base", model_max_length=512) for i in range(12, 512): new_encoded_inputs = tokenizer.encode(words, boxes=boxes, max_length=i, truncation=True) self.assertLessEqual(len(new_encoded_inputs), i) tokenizer.model_max_length = 20 new_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True) dropped_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True) self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs) self.assertLessEqual(len(new_encoded_inputs), 20) @is_pt_tf_cross_test def test_batch_encode_plus_tensors(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes_batch() self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt") self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf") if tokenizer.pad_token_id is None: self.assertRaises( ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, padding=True, return_tensors="pt", ) self.assertRaises( ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, padding="longest", return_tensors="tf", ) else: pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt") tensorflow_tensor = tokenizer.batch_encode_plus( words, boxes=boxes, padding="longest", return_tensors="tf" ) encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True) for key in encoded_sequences.keys(): pytorch_value = pytorch_tensor[key].tolist() tensorflow_value = tensorflow_tensor[key].numpy().tolist() encoded_value = encoded_sequences[key] self.assertEqual(pytorch_value, tensorflow_value, encoded_value) def test_sequence_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: if not tokenizer.is_fast: continue with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0 = "Test this method." seq_1 = ["With", "these", "inputs."] boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))] output = tokenizer(seq_0.split(), boxes=boxes) self.assertIn(0, output.sequence_ids()) output = tokenizer(seq_0, seq_1, boxes=boxes) self.assertIn(0, output.sequence_ids()) self.assertIn(1, output.sequence_ids()) if tokenizer.num_special_tokens_to_add(pair=True): self.assertIn(None, output.sequence_ids()) def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) words = "Hey this is a <special> token".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] r_output = tokenizer_r.encode(words, boxes=boxes) special_token_id = tokenizer_r.encode( ["<special>"], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False )[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) words = "Hey this is a <special> token".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] p_output = tokenizer_p.encode(words, boxes=boxes) cr_output = tokenizer_cr.encode(words, boxes=boxes) self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) def test_training_new_tokenizer(self): if not self.test_rust_tokenizer: return tokenizer = self.get_rust_tokenizer() new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) text = [["this", "is", "the"], ["how", "are", "you"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]] inputs = new_tokenizer(text, boxes=boxes) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "this is the" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence) self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair) self.assertSequenceEqual( tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended, ) self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) def test_training_new_tokenizer_with_special_tokens_change(self): if not self.test_rust_tokenizer: return tokenizer = self.get_rust_tokenizer() class_signature = inspect.signature(tokenizer.__class__) if "cls_token" in class_signature.parameters: new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"} ) cls_id = new_tokenizer.get_vocab()["<cls>"] self.assertEqual(new_tokenizer.cls_token, "<cls>") self.assertEqual(new_tokenizer.cls_token_id, cls_id) special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() special_tokens_list.remove("additional_special_tokens") special_tokens_map = {} for token in special_tokens_list: if getattr(tokenizer, f"_{token}") is not None: special_token = getattr(tokenizer, token) special_tokens_map[special_token] = f"{special_token}a" new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map ) for token in special_tokens_list: if getattr(tokenizer, f"_{token}") is None: continue special_token = getattr(tokenizer, token) if special_token in special_tokens_map: new_special_token = getattr(new_tokenizer, token) self.assertEqual(special_tokens_map[special_token], new_special_token) new_id = new_tokenizer.get_vocab()[new_special_token] self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id) for special_token in tokenizer.all_special_tokens_extended: if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map: self.assertTrue( special_token in new_tokenizer.all_special_tokens_extended, f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}", ) elif isinstance(special_token, AddedToken): special_token_str = special_token.content new_special_token_str = special_tokens_map[special_token_str] find = False for candidate in new_tokenizer.all_special_tokens_extended: if ( isinstance(candidate, AddedToken) and candidate.content == new_special_token_str and candidate.lstrip == special_token.lstrip and candidate.rstrip == special_token.rstrip and candidate.normalized == special_token.normalized and candidate.single_word == special_token.single_word ): find = True break self.assertTrue( find, f"'{new_special_token_str}' doesn't appear in the list " f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as " f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}", ) elif special_token not in special_tokens_map: self.assertTrue( special_token in new_tokenizer.all_special_tokens_extended, f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}", ) else: self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended) words = [["this", "is"], ["hello", "🤗"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]] inputs = new_tokenizer(words, boxes=boxes) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "this is" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) def test_prepare_for_model(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: if tokenizer.__class__.__name__ == "LayoutXLMTokenizerFast": continue with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() prepared_input_dict = tokenizer.prepare_for_model(words, boxes=boxes, add_special_tokens=True) input_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True) self.assertEqual(input_dict, prepared_input_dict) def test_padding_different_model_input_name(self): if not self.test_slow_tokenizer: return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes) input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes) input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]] del input_r[tokenizer_r.model_input_names[0]] input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]] del input_p[tokenizer_p.model_input_names[0]] tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:] tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:] input_r = tokenizer_r.pad(input_r, padding="longest") input_p = tokenizer_r.pad(input_p, padding="longest") max_length = len(input_p["inputs"][0]) self.assert_batch_padded_input_match( input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs" ) def test_batch_encode_dynamic_overflowing(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" else: returned_tensor = "jax" words, boxes = self.get_words_and_boxes() tokens = tokenizer.encode_plus( words, boxes=boxes, max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): if key != "bbox": self.assertEqual(len(tokens[key].shape), 2) else: self.assertEqual(len(tokens[key].shape), 3) words, boxes = self.get_words_and_boxes_batch() tokens = tokenizer.batch_encode_plus( words, boxes=boxes, max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): if key != "bbox": self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) else: self.assertEqual(len(tokens[key].shape), 3) self.assertEqual(tokens[key].shape[-1], 4) def test_save_pretrained(self): if not self.test_slow_tokenizer: return self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-layoutxlm", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) @unittest.skip("TO DO: overwrite this very extensive test.") def test_alignement_methods(self): pass @unittest.skip("layoutxlm tokenizer requires boxes besides sequences.") def test_maximum_encoding_length_pair_input(self): pass @unittest.skip("layoutxlm tokenizer requires boxes besides sequences.") def test_maximum_encoding_length_single_input(self): pass @unittest.skip("layoutxlm tokenizer requires boxes besides sequences.") def test_pretokenized_inputs(self): pass @unittest.skip("layoutxlm tokenizer always expects pretokenized inputs.") def test_compare_pretokenized_inputs(self): pass @unittest.skip("layoutxlm fast tokenizer does not support prepare_for_model") def test_compare_prepare_for_model(self): pass @slow def test_only_label_first_subword(self): words = ["hello", "niels"] boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] word_labels = [0, 1] tokenizer_p = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base") encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, -100, 1, -100, -100]) tokenizer_p = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base", only_label_first_subword=False) encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, 0, 1, 1, -100]) tokenizer_r = LayoutXLMTokenizerFast.from_pretrained("microsoft/layoutxlm-base") encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, -100, 1, -100, -100]) tokenizer_r = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base", only_label_first_subword=False) encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, 0, 1, 1, -100]) @slow def test_layoutxlm_integration_test(self): tokenizer_p = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base") tokenizer_r = LayoutXLMTokenizerFast.from_pretrained("microsoft/layoutxlm-base") words, boxes = self.get_words_and_boxes() expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) words, boxes = self.get_words_and_boxes_batch() expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) words, boxes = self.get_words_and_boxes() word_labels = [1, 2, 3] expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) words, boxes = self.get_words_and_boxes_batch() word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]] expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, -100, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) question, words, boxes = self.get_question_words_and_boxes() expected_results = {'input_ids': [0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]} encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) questions, words, boxes = self.get_question_words_and_boxes_batch() expected_results = {'input_ids': [[0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], [0, 3642, 83, 764, 35839, 32, 2, 2, 2367, 10, 21, 3190, 53496, 19, 2, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]} encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) @unittest.skip("Doesn't support another framework than PyTorch") def test_np_encode_plus_sent_to_model(self): pass @unittest.skip("Doesn't use SentencePiece") def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): pass @unittest.skip("Doesn't use SentencePiece") def test_sentencepiece_tokenize_and_decode(self): pass @unittest.skip("Chat is not supported") def test_chat_template(self): pass
codingutf8 2021 iz beltagy matthew e peters arman cohan and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch led model import copy import tempfile import unittest from transformers import ledconfig istorchavailable from transformers models auto import getvalues from transformers testingutils import requiresentencepiece requiretokenizers requiretorch requiretorchfp16 slow torchdevice from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelforquestionansweringmapping ledforconditionalgeneration ledforquestionanswering ledforsequenceclassification ledmodel ledtokenizer from transformers models led modelingled import leddecoder ledencoder def prepareledinputsdict config inputids decoderinputids attentionmasknone decoderattentionmasknone headmasknone decoderheadmasknone crossattnheadmasknone if attentionmask is none attentionmask inputids neconfig padtokenid if decoderattentionmask is none decoderattentionmask decoderinputids neconfig padtokenid if headmask is none headmask torch onesconfig encoderlayers config encoderattentionheads devicetorchdevice if decoderheadmask is none decoderheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice if crossattnheadmask is none crossattnheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice return inputids inputids decoderinputids decoderinputids attentionmask attentionmask decoderattentionmask decoderattentionmask headmask headmask decoderheadmask decoderheadmask crossattnheadmask crossattnheadmask class ledmodeltester def init self parent batchsize13 seqlength11 istrainingtrue uselabelsfalse vocabsize99 hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings32 eostokenid2 padtokenid1 bostokenid0 attentionwindow4 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid self attentionwindow attentionwindow modeltestermixin testattentionoutputs is expecting attention tensors to be of size numattentionheads encoderseqlength encoderkeylength but longformerselfattention returns attention of shape numattentionheads encoderseqlength self attentionwindow 1 because its local attention only attends to self attentionwindow 1 locations assuming no token with global attention otherwise the last dimension of attentions is x self attentionwindow 1 where x is the number of tokens with global attention x is set to 1 self encoderkeylength self attentionwindow 2 because of padding encoderseqlength is different from seqlength relevant for the testattentionoutputs and testhiddenstatesoutput tests self encoderseqlength self seqlength def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputids idstensorself batchsize self seqlength self vocabsize clamp 3 inputids 1 self eostokenid eos token decoderinputids idstensorself batchsize self seqlength self vocabsize config self getconfig inputsdict prepareledinputsdictconfig inputids decoderinputids return config inputsdict def getconfigself return ledconfig vocabsizeself vocabsize dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid attentionwindowself attentionwindow def getpipelineconfigself config self getconfig config maxpositionembeddings 100 config vocabsize 300 return config def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs globalattentionmask torch zeroslikeinputsdictinputids globalattentionmask 1 1 inputsdictglobalattentionmask globalattentionmask return config inputsdict def createandcheckdecodermodelpastlargeinputsself config inputsdict model ledmodelconfigconfig getdecoder totorchdevice eval inputids inputsdictinputids attentionmask inputsdictattentionmask headmask inputsdictheadmask first forward pass outputs modelinputids attentionmaskattentionmask headmaskheadmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e2 def checkencoderdecodermodelstandaloneself config inputsdict model ledmodelconfigconfig totorchdevice eval outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder ledencoder frompretrainedtmpdirname totorchdevice encoderlasthiddenstate2 encoder inputsdictinputids attentionmaskinputsdictattentionmask globalattentionmaskinputsdictglobalattentionmask 0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max item 1e3 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder leddecoder frompretrainedtmpdirname totorchdevice lasthiddenstate2 decoder inputidsinputsdictdecoderinputids attentionmaskinputsdictdecoderattentionmask encoderhiddenstatesencoderlasthiddenstate encoderattentionmaskinputsdictattentionmask 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max item 1e3 def checkglobalattentionself config inputsdict model ledmodelconfigconfig totorchdevice eval model config outputattentions true attentionmask idstensorinputsdictinputids shape vocabsize2 globalattentionmask torch zeroslikeattentionmask set some tokens to globalattention numtokenswithglobalattention 2 attentionmask 2 2 numtokenswithglobalattention 1 globalattentionmask 2 2 numtokenswithglobalattention 1 inputsdictattentionmask attentionmask inputsdictglobalattentionmask globalattentionmask outputs modelinputsdict self parent assertisnotnoneoutputs encoderglobalattentions setting numtokenswithglobalattention to globalattentions yields makes last dim to be of numtokenswithglobalattention self parent asserttrue outputs encoderglobalattentions0 shape self batchsize self numattentionheads self encoderseqlength numtokenswithglobalattention requiretorch class ledmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses ledmodel ledforconditionalgeneration ledforsequenceclassification ledforquestionanswering if istorchavailable else allgenerativemodelclasses ledforconditionalgeneration if istorchavailable else pipelinemodelmapping conversational ledforconditionalgeneration featureextraction ledmodel questionanswering ledforquestionanswering summarization ledforconditionalgeneration textclassification ledforsequenceclassification text2textgeneration ledforconditionalgeneration translation ledforconditionalgeneration zeroshot ledforsequenceclassification if istorchavailable else isencoderdecoder true testpruning false testmissingkeys false testtorchscript false todo fix the failed tests when this model gets more usage def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname if pipelinetestcassename qapipelinetests and not tokenizername endswithfast return true return false def setupself self modeltester ledmodeltesterself self configtester configtesterself configclassledconfig def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testencoderdecodermodelstandaloneself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkencoderdecodermodelstandaloneconfigandinputs def testglobalattentionself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkglobalattentionconfigandinputs ledforsequenceclassification does not support inputsembeds def testinputsembedsself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in ledmodel ledforconditionalgeneration ledforquestionanswering model modelclassconfig model totorchdevice model eval inputs copy deepcopyself prepareforclassinputsdict modelclass if not self isencoderdecoder inputids inputsinputids del inputsinputids else encoderinputids inputsinputids decoderinputids inputs getdecoderinputids encoderinputids del inputsinputids inputs popdecoderinputids none wte model getinputembeddings if not self isencoderdecoder inputsinputsembeds wteinputids else inputsinputsembeds wteencoderinputids inputsdecoderinputsembeds wtedecoderinputids with torch nograd modelinputs0 requiretorchfp16 def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs inputids inputdictinputids attentionmask inputids ne1 totorchdevice model ledforconditionalgenerationconfig eval totorchdevice model half model generateinputids attentionmaskattentionmask model generatenumbeams4 dosampletrue earlystoppingfalse numreturnsequences3 def testretaingradhiddenstatesattentionsself longformer cannot keep gradients in attentions or hidden states return def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlength self modeltester seqlength encoderseqlength self modeltester encoderseqlength encoderkeylength self modeltester encoderkeylength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength outlen lenoutputs global attention outputs are added as well so 1 here correctoutlen 6 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning question answering model returns startlogits and endlogits if modelclass in getvaluesmodelforquestionansweringmapping correctoutlen 1 startlogits and endlogits instead of only 1 output if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads seqlength seqlength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads seqlength seqlength def asserttensorsclosea b atol1e12 prefix all the below results were obtained with the original checkpoints and code base from https github comallenailongformer important note that the original checkpoints include a postionembeddings hack and have to be cut to have the correct shape see https github comhuggingfacetransformerspull9278issue544709661 change to intended input change to expected output here change to intended input change to expected output here this test requires 16gb of ram coding utf 8 2021 iz beltagy matthew e peters arman cohan and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch led model modeltestermixin test_attention_outputs is expecting attention tensors to be of size num_attention_heads encoder_seq_length encoder_key_length but longformerselfattention returns attention of shape num_attention_heads encoder_seq_length self attention_window 1 because its local attention only attends to self attention_window 1 locations assuming no token with global attention otherwise the last dimension of attentions is x self attention_window 1 where x is the number of tokens with global attention x is set to 1 because of padding encoder_seq_length is different from seq_length relevant for the test_attention_outputs and test_hidden_states_output tests eos token first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice set some tokens to global_attention setting num_tokens_with_global_attention to global_attentions yields makes last dim to be of num_tokens_with_global_attention todo fix the failed tests when this model gets more usage ledforsequenceclassification does not support inputs_embeds longformer cannot keep gradients in attentions or hidden states check that output_attentions also work using config global attention outputs are added as well so 1 here loss is at first position loss is added to beginning question answering model returns start_logits and end_logits start_logits and end_logits instead of only 1 output past_key_values have been returned decoder attentions cross attentions if tensors have different shapes different values or a and b are not both tensors raise a nice assertion error all the below results were obtained with the original checkpoints and code base from https github com allenai longformer important note that the original checkpoints include a postion_embeddings hack and have to be cut to have the correct shape see https github com huggingface transformers pull 9278 issue 544709661 change to intended input change to expected output here change to intended input change to expected output here this test requires 16gb of ram the lep experiments at the resonance of xmath1 boson have tested the standard model sm at quantum level measuring the xmath1 decay into fermion pairs with an accuracy of one part in ten thousands the good agreement of the lep data with the sm predictions have severely constrained the behavior of new physics at the xmath1 pole taking these achievements into account one can imagine that the physics of xmath1 boson will again play the central role in the frontier of particle physics if the next generation xmath1 factory comes true with the generated xmath1 events several orders of magnitude higher than that of the lep this factory can be realized in the gigaz option of the international linear collider ilc xcite the ilc is a proposed electron positron collider with tunable energy ranging from xmath12 to xmath13 and polarized beams in its first phase and the gigaz option corresponds to its operation on top of the resonance of xmath1 boson by adding a bypass to its main beam line given the high luminosity xmath14 and the cross section at the resonance of xmath1 boson xmath15 about xmath16 xmath1 events can be generated in an operational year of xmath17 of gigaz which implies that the expected sensitivity to the branching ratio of xmath1 decay can be improved from xmath18 at the lep to xmath19 at the gigaz xcite in light of this the xmath1 boson properties especially its exotic or rare decays which are widely believed to be sensitive to new physics should be investigated comprehensively to evaluate their potential in probing new physics among the rare xmath1 decays the flavor changing fc processes were most extensively studied to explore the flavor texture in new physics xcite and it was found that although these processes are severely suppressed in the sm their branching ratios in new physics models can be greatly enhanced to xmath19 for lepton flavor violation decays xcite and xmath20 for quark flavor violation decays xcite besides the fc processes the xmath1 decay into light higgs boson s is another type of rare process that was widely studied e g the decay xmath21 xmath22 with the particle xmath0 denoting a light higgs boson was studied in xcite the decay xmath23 was studied in the two higgs doublet model 2hdm xcite and the minimal supersymmetric standard model mssm xcite and the decay xmath4 was studied in a model independent way xcite in 2hdm xcite and also in mssm xcite these studies indicate that in contrast with the kinematic forbidden of these decays in the sm the rates of these decays can be as large as xmath18 in new physics models which lie within the expected sensitivity of the gigaz in this work we extend the previous studies of these decays to some new models and investigate these decays altogether we are motivated by some recent studies on the singlet extension of the mssm such as the next to minimal supersymmetric standard model nmssm xcite and the nearly minimal supersymmetric standard model nmssm xcite where a light cp odd higgs boson xmath0 with singlet dominant component may naturally arise from the spontaneous breaking of some approximate global symmetry like xmath24 or peccei quuin symmetry xcite these non minimal supersymmetric models can not only avoid the xmath25 problem but also alleviate the little hierarchy by having such a light higgs boson xmath0 xcite we are also motivated by that with the latest experiments the properties of the light higgs boson are more stringently constrained than before so it is worth updating the previous studies so far there is no model independent lower bound on the lightest higgs boson mass in the sm it must be heavier than xmath26 gev obtained from the null observation of the higgs boson at lep experiments however due to the more complex structure of the higgs sector in the extensions of the sm this lower bound can be significantly relaxed according to recent studies e g for the cp odd higgs boson xmath0 we have xmath27 gev in the nmssm xcite xmath28 gev in the nmssm xcite and xmath29 gev in the lepton specific 2hdm l2hdm xcite with such a light cp odd higgs boson the z decay into one or more xmath0 is open up noting that the decay xmath30 is forbidden due to bose symmetry we in this work study the rare xmath1 decays xmath6 xmath22 xmath31 and xmath4 in a comparative way for four models namely the type ii 2hdm xcite the l2hdm xcite the nmssm and the nmssm in our study we examine carefully the constraints on the light xmath0 from many latest experimental results this work is organized as follows in sec ii we briefly describe the four new physics models in sec iii we present the calculations of the rare xmath1 decays in sec iv we list the constraints on the four new physics models in sec v we show the numerical results for the branching ratios of the rare xmath1 decays in various models finally the conclusion is given in sec as the most economical way the sm utilizes one higgs doublet to break the electroweak symmetry as a result the sm predicts only one physical higgs boson with its properties totally determined by two free parameters in new physics models the higgs sector is usually extended by adding higgs doublets and or singlets and consequently more physical higgs bosons are predicted along with more free parameters involved in the general 2hdm contains two xmath32 doublet higgs fields xmath33 and xmath34 and with the assumption of cp conserving its scalar potential can be parameterized as xcite xmath35 end aligned where xmath36 xmath37 are free dimensionless parameters and xmath38 xmath39 are the parameters with mass dimension after the electroweak symmetry breaking the spectrum of this higgs sector includes three massless goldstone modes which become the longitudinal modes of xmath40 and xmath1 bosons and five massive physical states two cp even higgs bosons xmath41 and xmath42 one neutral cp odd higgs particle xmath0 and a pair of charged higgs bosons xmath43 noting the constraint xmath44 with xmath45 and xmath46 denoting the vacuum expectation values vev of xmath33 and xmath34 respectively we choose xmath47 as the input parameters with xmath48 and xmath49 being the mixing angle that diagonalizes the mass matrix of the cp even higgs fields the difference between the type ii 2hdm and the l2hdm comes from the yukawa coupling of the higgs bosons to quark lepton in the type ii 2hdm one higgs doublet xmath34 generates the masses of up type quarks and the other doublet xmath33 generates the masses of down type quarks and charged leptons while in the l2hdm one higgs doublet xmath33 couples only to leptons and the other doublet xmath34 couples only to quarks so the yukawa interactions of xmath0 to fermions in these two models are given by xcite xmath50 with xmath51 denoting generation index obviously in the type ii 2hdm the xmath52 coupling and the xmath53 coupling can be simultaneously enhanced by xmath54 while in the l2hdm only the xmath53 coupling is enhanced by xmath55 the structures of the nmssm and the nmssm are described by their superpotentials and corresponding soft breaking terms which are given by xcite xmath56 where xmath57 is the superpotential of the mssm without the xmath25 term xmath58 and xmath59 are higgs doublet and singlet superfields with xmath60 and xmath61 being their scalar component respectively xmath62 xmath63 xmath64 xmath65 xmath66 and xmath67 are soft breaking parameters and xmath68 and xmath69 are coefficients of the higgs self interactions with the superpotentials and the soft breaking terms one can get the higgs potentials of the nmssm and the nmssm respectively like the 2hdm the higgs bosons with same cp property will mix and the mass eigenstates are obtained by diagonalizing the corresponding mass matrices xmath70 where the fields on the right hands of the equations are component fields of xmath71 xmath72 and xmath61 defined by xmath73 xmath74 and xmath75 are respectively the cp even and cp odd neutral higgs bosons xmath76 and xmath77 are goldstone bosons eaten by xmath1 and xmath78 and xmath79 is the charged higgs boson so both the nmssm and nmssm predict three cp even higgs bosons two cp odd higgs bosons and one pair of charged higgs bosons in general the lighter cp odd higgs xmath0 in these model is the mixture of the singlet field xmath80 and the doublet field combination xmath81 i e xmath82 and its couplings to down type quarks are then proportional to xmath83 so for singlet dominated xmath0 xmath84 is small and the couplings are suppressed as a comparison the interactions of xmath0 with the squarks are given by xcite xmath85 i e the interaction does not vanish when xmath86 approaches zero just like the 2hdm where we use the vevs of the higgs fields as fundamental parameters we choose xmath68 xmath69 xmath87 xmath88 xmath66 and xmath89 as input parameters for the nmssm xcite and xmath68 xmath54 xmath88 xmath65 xmath90 and xmath91 as input parameters for the nmssm xcite about the nmssm and the nmssm three points should be noted the first is for the two models there is no explicit xmath92term and the effective xmath25 parameter xmath93 is generated when the scalar component of xmath59 develops a vev the second is the nmssm is actually same as the nmssm with xmath94 xcite because the tadpole terms xmath95 and its soft breaking term xmath96 in the nmssm do not induce any interactions except for the tree level higgs boson masses and the minimization conditions and the last is despite of the similarities the nmssm has its own peculiarity which comes from its neutralino sector in the basis xmath97 its neutralino mass matrix is given by xcite xmath98 where xmath99 and xmath100 are xmath101 and xmath102 gaugino masses respectively xmath103 xmath104 xmath105 and xmath106 after diagonalizing this matrix one can get the mass eigenstate of the lightest neutralino xmath107 with mass taking the following form xcite xmath108 this expression implies that xmath107 must be lighter than about xmath109 gev for xmath110 from lower bound on chargnio mass and xmath111 perturbativity bound like the other supersymmetric models xmath107 as the lightest sparticle acts as the dark matter in the universe but due to its singlino dominated nature it is difficult to annihilate sufficiently to get the correct density in the current universe so the relic density of xmath107 plays a crucial way in selecting the model parameters for example as shown in xcite for xmath112 there is no way to get the correct relic density and for the other cases xmath107 mainly annihilates by exchanging xmath1 boson for xmath113 or by exchanging a light cp odd higgs boson xmath0 with mass satisfying the relation xmath114 for xmath115 for the annihilation xmath54 and xmath25 are required to be less than 10 and xmath116 respectively because through eq mass exp a large xmath87 or xmath25 will suppress xmath117 to make the annihilation more difficult the properties of the lightest cp odd higgs boson xmath0 such as its mass and couplings are also limited tightly since xmath0 plays an important role in xmath107 annihilation the phenomenology of the nmssm is also rather special and this was discussed in detail in xcite in the type ii 2hdm l2hdm nmssm and nmssm the rare xmath1 decays xmath118 xmath22 xmath3 and xmath4 may proceed by the feynman diagrams shown in fig fig1 fig fig2 and fig fig3 respectively for these diagrams the intermediate state xmath119 represents all possible cp even higgs bosons in the corresponding model i e xmath41 and xmath42 in type ii 2hdm and l2hdm and xmath41 xmath42 and xmath120 in nmssm and nmssm in order to take into account the possible resonance effects of xmath119 in fig fig1 c for xmath2 and fig fig3 a for xmath11 we have calculated all the decay modes of xmath119 and properly included the width effect in its propagator as to the decay xmath121 two points should be noted one is unlike the decays xmath6 and xmath11 this process proceeds only through loops mediated by quarks leptons in the type ii 2hdm and l2hdm and additionally by sparticles in the nmssm and nmssm so in most cases its rate should be much smaller than the other two the other is due to cp invariance loops mediated by squarks sleptons give no contribution to the decay xcite in actual calculation this is reflected by the fact that the coupling coefficient of xmath122 differs from that of xmath123 by a minus sign see eq asqsq and as a result the squark mediated contributions to xmath121 are completely canceled out with regard to the rare decay xmath11 we have more explanations in the lowest order this decay proceeds by the diagram shown in fig fig3 a and hence one may think that as a rough estimate it is enough to only consider the contributions from fig fig3 a however we note that in some cases of the type ii 2hdm and l2hdm due to the cancelation of the contributions from different xmath119 in fig fig3 a and also due to the potentially largeness of xmath124 couplings i e larger than the electroweak scale xmath125 the radiative correction from the higgs mediated loops may dominate over the tree level contribution even when the tree level prediction of the rate xmath126 exceeds xmath20 on the other hand we find the contribution from quark lepton mediated loops can be safely neglected if xmath127 in the type ii 2hdm and the l2hdm in the nmssm and the nmssm besides the corrections from the higgs and quark lepton mediated loops loops involving sparticles such as squarks charginos and neutralinos can also contribute to the decay we numerically checked that the contributions from squarks and charginos can be safely neglected if xmath127 we also calculated part of potentially large neutralino correction note that there are totally about xmath128 diagrams for such correction and found they can be neglected too since considering all the radiative corrections will make our numerical calculation rather slow we only include the most important correction namely that from higgs mediated loops in presenting our results for the four models one can intuitively understand the relative smallness of the sparticle contribution to xmath11 as follows first consider the squark contribution which is induced by the xmath129 interaction xmath130 denotes the squark in chirality state and the xmath131 interaction through box diagrams because the xmath132 interaction conserves the chirality of the squarks while the xmath133 interaction violates the chirality to get non zero contribution to xmath11 from the squark loops at least four chiral flippings are needed with three of them provided by xmath131 interaction and the rest provided by the left right squark mixing this means that if one calculates the amplitude in the chirality basis with the mass insertion method the amplitude is suppressed by the mixing factor xmath134 with xmath135 being the off diagonal element in squark mass matrix next consider the chargino neutralino contributions since for a light xmath0 its doublet component parameterized by xmath84 in eq mixing is usually small the couplings of xmath0 with the sparticles will never be tremendously large xcite so the chargino neutralino contributions are not important too in our calculation of the decays we work in the mass eigenstates of sparticles instead of in the chirality basis for the type ii 2hdm and the l2hdm we consider the following constraints xcite theoretical constraints on xmath136 from perturbativity unitarity and requirements that the scalar potential is finit at large field values and contains no flat directions xcite which imply that xmath137 the constraints from the lep search for neutral higgs bosons we compute the signals from the higgs strahlung production xmath138 xmath139 with xmath140 xcite and from the associated production xmath141 with xmath142 xcite and compare them with the corresponding lep data which have been inputted into our code we also consider the constraints from xmath138 by looking for a peak of xmath143 recoil mass distribution of xmath1 boson xcite and the constraint of xmath144 mev when xmath145 xcite these constraints limit the quantities such as xmath146 times br h_i to bar b b on the xmath147 plane with the the subscript xmath148 denoting the coupling coefficient of the xmath149 interaction they also impose a model dependent lower bound on xmath150 e g xmath151 for the type ii 2hdm from our scan results xmath152 for the l2hdm xcite and xmath153 for the nmssm xcite these bounds are significantly lower than that of the sm i e xmath154 partially because in new physics models unconventional decay modes of xmath155 such as xmath156 are open up as to the nmssm another specific reason for allowing a significantly lighter cp even higgs boson is that the boson may be singlet dominated in this model with regard to the lightest cp odd higgs boson xmath0 we checked that there is no lower bound on its mass so long as the xmath157 interaction is weak or xmath155 is sufficiently heavy the constraints from the lep search for a light higgs boson via the yukawa process xmath158 with xmath22 and xmath61 denoting a scalar xcite these constraints can limit the xmath159 coupling versus xmath160 in new physics models the constraints from the cleo iii limit on xmath161 and the latest babar limits on xmath162 these constraints will put very tight constraints on the xmath163 coupling for xmath164 in our analysis we use the results of fig 8 in the second paper of xcite to excluded the unfavored points the constraints from xmath165 couplings since the higgs sector can give sizable higher order corrections to xmath165 couplings we calculate them to one loop level and require the corrected xmath165 couplings to lie within the xmath166 range of their fitted value the sm predictions for the couplings at xmath1 pole are given by xmath167 and xmath168 xcite and the fitted values are given by xmath169 and xmath170 respectively xcite we adopt the formula in xcite to the 2hdm in our calculation the constraints from xmath171 leptonic decay we require the new physics correction to the branching ratio xmath172 to be in the range of xmath173 xcite we use the formula in xcite in our calculation about the constraints 5 and 6 two points should be noted one is all higgs bosons are involved in the constraints by entering the self energy of xmath171 lepton the xmath174 vertex correction or the xmath175 vertex correction and also the box diagrams for xmath176 xcite since the yukawa couplings of the higgs bosons to xmath171 lepton get enhanced by xmath54 and so do the corrections xmath54 must be upper bounded for given spectrum of the higgs sector generally speaking the lighter xmath0 is the more tightly xmath54 is limited xcite the other point is in the type ii 2hdm xmath177 b physics observables as well as xmath178 decays discussed above can constraint the model in a tighter way than the constraints 5 and 6 since the yukawa couplings of xmath171 lepton and xmath179 quark are simultaneously enhanced by xmath54 but for the l2hdm because only the yukawa couplings of xmath171 lepton get enhanced see eq yukawa the constraints 5 and 6 are more important in limiting xmath54 indirect constraints from the precision electroweak observables such as xmath180 xmath181 and xmath182 or their combinations xmath183 xcite we require xmath184 to be compatible with the lep sld data at xmath185 confidence level xcite we also require new physics prediction of xmath186 is within the xmath187 range of its experimental value the latest results for xmath188 are xmath189 measured value and xmath190 sm prediction for xmath191 gev xcite in our code we adopt the formula for these observables presented in xcite to the type ii 2hdm and the l2hdm respectively in calculating xmath180 xmath181 and xmath182 we note that these observables get dominant contributions from the self energies of the gauge bosons xmath1 xmath192 and xmath193 since there is no xmath194 coupling or xmath195 coupling xmath0 must be associated with the other higgs bosons to contribute to the self energies so by the uv convergence of these quantities one can infer that for the case of a light xmath0 and xmath196 these quantities depend on the spectrum of the higgs sector in a way like xmath197 at leading order which implies that a light xmath0 can still survive the constraints from the precision electroweak observables given the splitting between xmath150 and xmath198 is moderate xcite the constraints from b physics observables such as the branching ratios for xmath199 xmath200 and xmath201 and the mass differences xmath202 and xmath203 we require their theoretical predications to agree with the corresponding experimental values at xmath187 level in the type ii 2hdm and the l2hdm only the charged higgs boson contributes to these observables by loops so one can expect that xmath198 versus xmath54 is to be limited combined analysis of the limits in the type ii 2hdm has been done by the ckmfitter group and the lower bound of xmath204 as a function of xmath87 was given in fig 11 of xcite this analysis indicates that xmath198 must be heavier than xmath205 at xmath185 c l regardless the value of xmath54 in this work we use the results of fig 11 in xcite to exclude the unfavored points as for the l2hdm b physics actually can not put any constraints xcite because in this model the couplings of the charged higgs boson to quarks are proportional to xmath206 and in the case of large xmath54 which we are interested in they are suppressed in our analysis of the l2hdm we impose the lep bound on xmath198 i e xmath207 xcite the constraints from the muon anomalous magnetic moment xmath208 now both the theoretical prediction and the experimental measured value of xmath208 have reached a remarkable precision but a significant deviation still exists xmath209 xcite in the 2hdm xmath208 gets additional contributions from the one loop diagrams induced by the higgs bosons and also from the two loop barr zee diagrams mediated by xmath0 and xmath155 xcite if the higgs bosons are much heavier than xmath25 lepton mass the contributions from the barr zee diagrams are more important and to efficiently alleviate the discrepancy of xmath208 one needs a light xmath0 along with its enhanced couplings to xmath25 lepton and also to heavy fermions such as bottom quark and xmath171 lepton to push up the effects of the barr zee diagram xcite the cp even higgs bosons are usually preferred to be heavy since their contributions to xmath208 are negative in the type ii 2hdm because xmath54 is tightly constrained by the process xmath210 at the lep xcite and the xmath178 decay xcite the barr zee diagram contribution is insufficient to enhance xmath208 to xmath187 range around its measured value xcite so in our analysis we require the type ii 2hdm to explain xmath208 at xmath211 level while for the l2hdm xmath54 is less constrained compared with the type ii 2hdm and the barr zee diagram involving the xmath171 loop is capable to push up greatly the theoretical prediction of xmath208 xcite therefore we require the l2hdm to explain the discrepancy at xmath187 level unlike the other constraints discussed above the xmath208 constraint will put a two sided bound on xmath54 since on the one hand it needs a large xmath54 to enhance the barr zee contribution but on the other hand too large xmath54 will result in an unacceptable large xmath208 since this paper concentrates on a light xmath0 the decay xmath212 is open up with a possible large decay width we require the width of any higgs boson to be smaller than its mass to avoid a too fat higgs boson xcite we checked that for the scenario characterized by xmath213 the coefficient of xmath214 interaction is usually larger than the electroweak scale xmath125 and consequently a large decay width is resulted for the nmssm and nmssm the above constraints become more complicated because in these models not only more higgs bosons are involved in but also sparticles enter the constraints so it is not easy to understand some of the constraints intuitively take the process xmath199 as an example in the supersymmetric models besides the charged higgs contribution chargino loops gluino loops as well as neutralino loops also contribute to the process xcite and depending on the susy parameters any of these contributions may become dominated over or be canceled by other contributions as a result although the charged higgs affects the process in the same way as that in the type ii 2hdm charged higgs as light as xmath215 is still allowed even for xmath216 xcite since among the constraints xmath208 is rather peculiar in that it needs new physics to explain the discrepancy between xmath217 and xmath218 we discuss more about its dependence on susy parameters in the nmssm and the nmssm xmath208 receives contributions from higgs loops and neutralino chargino loops for the higgs contribution it is quite similar to that of the type ii 2hdm except that more higgs bosons are involved in xcite for the neutralino chargino contribution in the light bino limit i e xmath219 it can be approximated by xcite xmath220 for xmath221 with xmath222 being smuon mass so combining the two contributions together one can learn that a light xmath0 along with large xmath54 and or light smuon with moderate xmath87 are favored to dilute the discrepancy because more parameters are involved in the constraints on the supersymmetric models we consider following additional constraints to further limit their parameters direct bounds on sparticle masses from the lep1 the lep2 and the tevatron experiments xcite the lep1 bound on invisible z decay xmath223 the lep2 bound on neutralino production xmath224 and xmath225 xcite dark matter constraints from the wmap relic density 0 0975 xmath226 0 1213 xcite note that among the above constraints the constraint 2 on higgs sector and the constraint c on neutralino sector are very important this is because in the supersymmetric models the sm like higgs is upper bounded by about xmath227 at tree level and by about xmath228 at loop level and that the relic density restricts the lsp annihilation cross section in a certain narrow range in our analysis of the nmssm we calculate the constraints 3 and 5 7 by ourselves and utilize the code nmssmtools xcite to implement the rest constraints we also extend nmssmtools to the nmssm to implement the constraints for the extension the most difficult thing we faced is how to adapt the code micromegas xcite to the nmssm case we solve this problem by noting the following facts as we mentioned before the nmssm is actually same as the nmssm with the trilinear singlet term setting to zero so we can utilize the model file of the nmssm as the input of the micromegas and set xmath229 since in the nmssm the lsp is too light to annihilate into higgs pairs there is no need to reconstruct the effective higgs potential to calculate precisely the annihilation channel xmath230 with xmath61 denoting any of higgs bosons xcite we thank the s of the nmssmtools for helpful discussion on this issue when we finish such extension xcite with the above constraints we perform four independent random scans over the parameter space of the type ii 2hdm the l2hdm the nmssm and the nmssm respectively we vary the parameters in following ranges xmath231 for the type ii 2hdm xmath232 for the l2hdm xmath233 for the nmssm and xmath234 for the nmssm in performing the scans we note that for the nmssm and the nmssm some constraints also rely on the gaugino masses and the soft breaking parameters in the squark sector and the slepton sector since these parameters affect little on the properties of xmath0 we fix them to reduce the number of free parameters in our scan for the squark sector we adopt the xmath235 scenario which assumes that the soft mass parameters for the third generation squarks are degenerate xmath236 800 gev and that the trilinear couplings of the third generation squarks are also degenerate xmath237 with xmath238 for the slepton sector we assume all the soft breaking masses and trilinear parameters to be 100 gev this setting is necessary for the nmssm since this model is difficult to explain the muon anomalous moment at xmath239 level for heavy sleptons xcite finally we assume the grand unification relation xmath240 for the gaugino masses with xmath241 being fine structure constants of the different gauge group with large number of random points in the scans we finally get about xmath242 xmath243 xmath244 and xmath242 samples for the type ii 2hdm the l2hdm the nmssm and the nmssm respectively which survive the constraints and satisfy xmath245 analyzing the properties of the xmath0 indicates that for most of the surviving points in the nmssm and the nmssm its dominant component is the singlet field numerically speaking xmath246 so that its couplings to the sm fermions are suppressed xcite our analysis also indicates that the main decay products of xmath0 are xmath247 for the l2hdm xcite xmath248 dominant and xmath247 subdominant for the type ii 2hdm the nmssm and the nmssm and in some rare cases neutralino pairs in the nmssm xcite in fig fig4 we project the surviving samples on the xmath249 plane this figure shows that the allowed range of xmath54 is from xmath250 to xmath251 in the type ii 2hdm and from xmath252 to xmath253 in the l2hdm just as we introduced before the lower bounds of xmath254 come from the fact that we require the models to explain the muon anomalous moment while the upper bound is due to we have imposed the constraint from the lep process xmath255 which have limited the upper reach of the xmath256 coupling for light xmath61 xcite for the dependence of xmath256 coupling on xmath54 see sec this figure also indicates that for the nmssm and the nmssm xmath54 is upper bounded by xmath257 for the nmssm this is because large xmath87 can suppress the dark matter mass to make its annihilation difficult see xcite and also sec ii but for the nmssm this is because we choose a light slepton mass so that large xmath54 can enhance xmath208 too significantly to be experimentally unacceptable we checked that for the slepton mass as heavy as xmath258 xmath259 is still allowed for the nmssm in fig fig5 and fig fig6 we show the branching ratios of xmath260 and xmath261 respectively fig fig5 indicates among the four models the type ii 2hdm predicts the largest ratio for xmath260 with its value varying from xmath262 to xmath263 the underlying reason is in the type ii 2hdm the xmath264 coupling is enhanced by xmath54 see fig fig4 while in the other three model the coupling is suppressed either by xmath265 or by the singlet component of the xmath0 fig fig6 shows that the l2hdm predicts the largest rate for xmath266 with its value reaching xmath5 in optimum case and for the other three models the ratio of xmath261 is at least about one order smaller than that of xmath267 this feature can be easily understood from the xmath268 coupling introduced in sect we emphasize that if the nature prefers a light xmath0 xmath260 and or xmath269 in the type ii 2hdm and the l2hdm will be observable at the gigaz then by the rates of the two decays one can determine whether the type ii 2hdm or the l2hdm is the right theory on the other hand if both decays are observed with small rates or fail to be observed the singlet extensions of the mssm are favored in fig fig7 we show the rate of xmath3 as the function of xmath270 this figure indicates that the branching ratio of xmath121 can reach xmath271 xmath272 xmath273 and xmath274 for the optimal cases of the type ii 2hdm the l2hdm the nmssm and the nmssm respectively which implies that the decay xmath121 will never be observable at the gigaz if the studied model is chosen by nature the reason for the smallness is as we pointed out before that the decay xmath121 proceeds only at loop level comparing the optimum cases of the type ii 2hdm the nmssm and the nmssm shown in fig 5 7 one may find that the relation xmath275 holds for any of the decays this is because the decays are all induced by the yukawa couplings with similar structure for the models in the supersymmetric models the large singlet component of the light xmath0 is to suppress the yukawa couplings and the xmath0 in the nmssm has more singlet component than that in the nmssm next we consider the decay xmath11 which unlike the above decays depends on the higgs self interactions in fig fig8 we plot its rate as a function of xmath270 and this figure indicates that the xmath276 may be the largest among the ratios of the exotic xmath1 decays reaching xmath277 in the optimum cases of the type ii 2hdm the l2hdm and the nmssm the underlying reason is in some cases the intermediate state xmath119 in fig fig3 a may be on shell in fact we find this is one of the main differences between the nmssm and the nmssm that is in the nmssm xmath119 in fig fig3 a may be on shell corresponds to the points with large xmath278 while in the nmssm this seems impossible so we conclude that the decay xmath11 may serve as an alternative channel to test new physics models especially it may be used to distinguish the nmssm from the nmssm if the supersymmetry is found at the lhc and the xmath11 is observed at the gigaz with large rate before we end our discussion we note that in the nmssm the higgs boson xmath0 may be lighter than xmath279 without conflicting with low energy data from xmath178 decays and the other observables see fig fig4 fig8 in this case xmath0 is axion like as pointed out in xcite we checked that among the rare xmath1 decays discussed in this paper the largest branching ratio comes from xmath280 which can reach xmath281 since in this case the decay product of xmath0 is highly collinear muon pair detecting the decay xmath280 may need some knowledge about detectors which is beyond our discussion in this paper we studied the rare xmath1 decays xmath2 xmath7 xmath282 and xmath4 in the type ii 2hdm lepton specific 2hdm nmssm and nmssm which predict a light cp odd higgs boson xmath0 in the parameter space allowed by current experiments the branching ratio can be as large as xmath5 for xmath118 xmath8 for xmath3 and xmath9 for xmath4 which implies that the decays xmath2 and xmath283 may be accessible at the gigaz option since different models predict different size of branching ratios these decays can be used to distinguish different model through the measurement of these rare decays this work was supported in part by hastit under grant no 2009hastit004 by the national natural science foundation of china nnsfc under grant nos 10821504 10725526 10635030 10775039 11075045 and by the project of knowledge innovation program pkip of chinese academy of sciences under grant no for some reviews see e g m a perez g tavares velasco and j j toscano int j mod a 19 159 2004 j m yang arxiv 1006 2594 j i illana m masip 67 035004 2003 j cao z xiong j m yang 32 245 2004 d atwood _ et al_ 66 093005 2002 j kalinowski and s pokorski 219 116 1989 a djouadi p m zerwas and j zunft 259 175 1991 a djouadi j kalinowski and p m zerwas z phys c 54 255 1992 m krawczyk _ et al _ 19 463 2001 8 495 1999 j f gunion g gamberini and s f novaes 38 3481 1988 thomas j weiler and tzu chiang yuan 318 337 1989 a djouadi _ et al _ 1 163 1998 hep ph 9701342 d chang and w y keung phys lett 77 3732 1996 e keith and e ma 57 2017 1998 m a perez g tavares velasco and j j toscano int j mod phys a 19 159 2004 f larios g tavares velasco and c p yuan 64 055004 2001 66 075006 2002 a djouadi _ et al _ 10 27 1999 hep ph 9903229 for a detailed introduction of the nmssm see f franke and h fraas int j mod a 12 1997 479 for a recent review of the nmssm see for example u ellwanger c hugonie and a m teixeira arxiv 0910 1785 see e g j r ellis j f gunion h e haber l roszkowski and f zwirner phys rev d 39 1989 844 m drees int j mod phys a 4 1989 3635 u ellwanger m rausch de traubenberg and c a savoy phys b 315 1993 331 nucl b 492 1997 21 d j miller r nevzorov p m zerwas 681 3 2004 c panagiotakopoulos k tamvakis 446 224 1999 469 145 1999 c panagiotakopoulos a pilaftsis 63 055003 2001 a dedes _ et al _ 63 055009 2001 a menon _ et al _ 70 035005 2004 v barger _ et al _ 630 85 2005 c balazs _ et al _ 0706 066 2007 b a dobrescu k t matchev 0009 031 2000 a arhrib k cheung t j hou k w song hep ph 0611211 0703 073 2007 x g he j tandean and g valencia 98 081802 2007 0806 002 2008 f domingo _ et al_ 0901 061 2009 gudrun hiller 70 034018 2004 r dermisek and john f gunion 75 075019 2007 79 055014 2009 81 055001 2010 r dermisek john f gunion and b mcelrath 76 051105 2007 z heng _ et al_ 77 095012 2008 a belyaev _ et al_ 81 075021 2010 d das and u ellwanger arxiv 1007 1151 hep ph s andreas o lebedev s ramos sanchez and a ringwald arxiv 1005 3978 hep ph j f gunion jhep 0908 032 2009 r dermisek and j f gunion phys rev d 81 075003 2010 r dermisek and j f gunion phys lett 95 041801 2005 phys d 73 111701 2006 j cao h e logan j m yang 79 091701 2009 j cao p wan l wu j m yang 80 071701 2009 j f gunion and h e haber 67 075019 2003 r m barnett _ et al _ phys b 136 191 1984 r m barnett g senjanovic and d wyler phys d 30 1529 1984 y grossman nucl b 426 355 1994 h s goh l j hall and p kumar jhep 0905 097 2009 a g akeroyd and w j stirling nucl b 447 3 1995 a g akeroyd phys b 377 95 1996 h e logan and d maclennan phys rev d 79 115022 2009 m aoki _ et al _ arxiv 0902 4665 hep ph v barger p langacker h s lee and g shaughnessy phys d 73 115010 2006 s hesselbach _ et _ arxiv 0810 0511v2 hep ph de vivie and p janot aleph collaboration pa13 027 contribution to the international conference on high energy physics warsaw poland 2531 july 1996 j kurowska o grajek and p zalewski delphi collaboration cern open 99 385 aleph collaboration and delphi collaboration and l3 collaboration phys rept 427 257 2006 j cao and j m yang jhep 0812 006 2008 m krawczyk and d temes eur j c 44 435 2005 g altarelli and r barbieri 253 161 1991 m e peskin t takeuchi 46 381 1992 c amsler _ et al _ particle data group 667 1 2008 o deschamps s descotes genon s monteil v niess s tjampens and v tisserand arxiv 0907 5135 hep ph s su and b thomas phys d 79 095014 2009 g abbiendi _ et al _ eur phys j c 32 453 2004 m davier _ et al _ 66 1 2010 k cheung _ et al _ phys d 64 111301 2001 k cheung and o c w kong phys d 68 053003 2003 t besmer c greub t hurth 609 359 2001 f borzumati _ et al _ 62 075005 2000 j cao k i hikasa w wang j m yang and l x yu phys d 82 051701 2010 arxiv 1006 4811 hep ph j f gunion _ et d 73 015011 2006 martin and j d wells phys d 64 035003 2001 j abdallah _ et al _ eur j c 31 421 2004 g abbiendi _ et al _ eur j c 35 1 2004 j dunkley _ et al _ wmap collaboration astrophys j suppl 180 306 2009 arxiv 0803 0586 astro ph u ellwanger _ et al _ 02 066 2005 g belanger f boudjema a pukhov and a semenov comput commun 174 577 2006 comput phys commun 176 367 2007 g belanger f boudjema c hugonie a pukhov and a semenov jcap 0509 001 2005 it is well known that the classical magnetoresistance mr in metals or semiconductors with a closed free electron fermi surface increases quadratically with increasing magnetic field xmath2 for xmath3 and saturates when xmath4 here xmath5 is the zero magnetic field mobility hence the extraordinarily high and linear mr lmr which breaks this familiar rule has been gaining much attention as soon as its discovery in the past decade this unexpected lmr has been reported in silver chalcogenide xcite indium antimonide xcite silicon xcite mnas gaas composite material xcite and graphene xcite kapitza s linear law xcite indicates that the metal shows a magnetoresistance linear in perpendicular magnetic field when it has an open fermi surface and a mean free path longer than the electronic larmor radius recently another two models irrespective of the open fermi surface have been constructed to provide possible mechanisms for the lmr phenomenon abrikosov suggested a quantum limit origin of lmr for the homogenous system with a gapless linear energy spectrum xcite his model requires that landau levels are well formed and the carrier concentration is small that all electrons occupy only the lowest landau band alternatively parish and littlewood developed a classical model without involving linear spectrum xcite ignoring the concrete microscopic mechanism they attributed this unusual mr to the mobility fluctuations in a strongly inhomogenous system topological insulators xcite tis are novel materials with a full energy gap in bulk while there are gapless surface states due to its unique band structure with only one helical dirac cone and linear energy dispersion xcite the surface states of the ti bi xmath0se xmath1 become an excellent platform for the study of quantum limit lmr the recent experiment in this flat surface system however reported that a large positive mr which becomes very linear above a characteristic field of xmath6 xmath7 xmath8 t was observed even in an opposite situation where the carrier sheet density is high that electrons occupy more than one landau levels xcite moreover they found that raising temperature to room temperature almost has no influence on the observed lmr it is striking that this observation is in conflict with abrikosov s model and also with the classical parish littlewood model so far a reliable theoretical scheme capable of explaining this novel experiment has still been lacking in this paper we generalize the balance equation approach xcite to a system modeling the surface states of a three dimensional ti to investigate the two dimensional magnetotransport in it we find that a positive nonsaturating and dominantly linear magnetoresistance can appear within quite wide magnetic field range in the ti surface state having a positive and finite effective g factor this linear magnetoresistance shows up in the system of high carrier concentration and low mobility when electrons are in extended states and spread over many smeared landau levels and persists up to room temperature providing a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi xmath0se xmath1 nanoribbons xcite we consider the surface state of a bi xmath0se xmath1 type large bulk gap ti in the xmath9 xmath10 plane under the influence of a uniform magnetic field xmath11 applied along the xmath12 direction xcite following the experimental observation xcite we assume that the fermi energy locates in the gap of the bulk band and above the dirac point i e the surface carriers are electrons further the separations of the fermi energy from the bottom of bulk band and dirac point are much larger than the highest temperature xmath13 considered in this work hence the contribution from the bulk band to the magnetotransport is negligible these electrons scattered by randomly distributed impurities and by phonons are driven by a uniform in plane electric field xmath14 in the topological surface the hamiltonian of this many electron and phonon system consists of an electron part xmath15 a phonon part xmath16 and electron impurity and electron phonon interactions xmath17 and xmath18 xmath19 here the electron hamiltonian is taken in the form xmath20 in which xmath21 xmath22 xmath23 and xmath24 stand respectively for the canonical momentum coordinate momentum and spin operators of the xmath25th electron having charge xmath26 xmath27 is the vector potential of the perpendicular magnetic field xmath28 in the landau gauge xmath29 is the fermi velocity xmath30 is the effective g factor of the surface electron and xmath31 is the bohr magneton with xmath32 the free electron mass the sum index xmath25 in eq helectron goes over all electrons of total number xmath33 in the surface state of unit area in the frame work of balance equation approach xcite the two dimensional center of mass c m momentum and coordinate xmath34 and xmath35 and the relative electron momenta and coordinates xmath36 and xmath37 are introduced to write the hamiltonian xmath15 into the sum of a single particle c m part xmath38 and a many particle relative electron part xmath39 xmath40 with xmath41 end aligned in this xmath42 is the canonical momentum of the center of mass and xmath43 is the canonical momentum for the xmath25th relative electron here we have also introduced c m spin operators xmath44 and xmath45 the commutation relations between the c m spin operators xmath46 and xmath47 and the spin operators xmath48 xmath49 and xmath50 of the xmath25th electron are of order of xmath51 xmath52 n 1 2 rm i varepsi lon_ beta_1 beta_2 beta_3 sigma_j beta_3 with xmath53 therefore for a macroscopic large xmath33 system the c m part xmath38 actually commutes with the relative electron part xmath54 in the hamiltonian i e the c m motion and the relative motion of electrons are truly separated from each other the couplings between the two emerge only through the electron impurity and electron phonon interactions furthermore the electric field xmath55 shows up only in xmath38 and in view of xmath56 rm i delta_ alpha beta delta_ ij 1 n simeq rm i delta_ alpha beta delta_ ij i e the relative electron momenta and coordinates can be treated as canonical conjugate variables the relative motion part xmath54 is just the hamiltonian of xmath33 electrons in the surface state of ti in the magnetic field without the presence of the electric field in terms of the c m coordinate xmath57 and the relative electron density operator xmath58 the electron impurity and electron phonon interactions can be written as xcite xmath59 here xmath60 and xmath61 are respectively the impurity potential an impurity at randomly distributed position xmath62 and electron phonon coupling matrix element in the plane wave representation and xmath63 with xmath64 and xmath65 being the creation and annihilation operators for a phonon of wavevector xmath66 in branch xmath67 having frequency xmath68 velocity operator xmath69 is the time variation of its coordinate xmath70 v_ rm f sigma_ rm c y hat i sigma_ rm c x hat j to derive a force balance equation for steady state transport we consider the heisenberg equation for the rate of change of the c m canonical momentum xmath71 xmath72 n e bm v times bm b n e bm e bm f _ rm i bm f _ rm p in which the frictional forces xmath73 and xmath74 share the same expressions as given in ref the statistical average of the operator equation can be determined to linear order in the electron impurity and electron phonon interactions xmath17 and xmath18 with the initial density matrix xmath75 at temperature xmath76 when the in plane electric field xmath77 is not strong for steady transport states we have xmath78 leading to a force balance equation of the form xmath79 here xmath80 the statistically averaged velocity of the moving center of mass is identified as the average rate of change of its position i e the drift velocity of the electron system driven by the electric field xmath77 and xmath81 and xmath82 are frictional forces experienced by the center of mass due to impurity and phonon scatterings xmath83 label fp end aligned in which xmath84 is the bose distribution function xmath85 and xmath86 stands for the imaginary part of the fourier spectrum of the relative electron density correlation function defined by xmath87 big rangle_ 0 where xmath88 and xmath89 denotes the statistical averaging over the initial density matrix xmath90 xcite the force balance equation describes the steady state two dimensional magnetotransport in the surface state of a ti note that the frictional forces xmath81 and xmath82 are in the opposite direction of the drift velocity xmath91 and their magnitudes are functions of xmath92 only with the drift velocity xmath93 in the xmath9 direction the force balance equation eq yields a transverse resistivity xmath94 and a longitudinal resistivity xmath95 the linear one is in the form xmath96 for calculating the electron density correlation function xmath97 we proceed in the landau representation xcite the landau levels of the single particle hamiltonian xmath98 of the relative electron system in the absence of electric field are composed of a positive xmath99 and a negative xmath100 branch xcite xmath101 with xmath102 and xmath103 and a zero xmath104 level xmath105 the corresponding landau wave functions are xmath106 and xmath107 for xmath108 and xmath109 for xmath104 here xmath110 is the wavevector of the system along xmath9 direction xmath111 with xmath112 and xmath113 is the harmonic oscillator eigenfunction with xmath114 being the hermite polynomial xmath115 and xmath116 each landau level contains xmath117 electron states for system of unit surface area the positive branch xmath118 and the xmath104 level xmath119 of the above energy spectra are indeed quite close to those of the surface states in the bulk gap of bi xmath0se xmath1 family materials derived from microscopic band calculation xcite the landau levels are broadened due to impurity phonon and electron electron scatterings we model the imaginary part of the retarded green s function or the density of states of the broadened landau level xmath120 written for branch and xmath104 levels using a gaussian type form xcite xmath121 with a half width xmath122 of the form xcite xmath123 1 2 here xmath124 is the single particle lifetime and xmath125 is the cyclotron frequency of linear energy dispersion system with xmath126 being the zero temperature fermi level using a semi empirical parameter xmath127 to relate xmath124 with the transport scattering time xmath128 and expressing xmath129 with the zero field mobility xmath5 at finite temperature xcite we can write the landau level broadening as xmath130 1 2 in the present study we consider the case of xmath120 doping i e the fermi level is high enough above the energy zero of the dirac cone in the range of branch levels and the states of xmath100 branch levels are completely filled that they are irrelevant to electron transport special attention has to be paid to the xmath104 level since depending on the direction of exchange potential the effective g factor of a ti surface state xmath30 can be positive zero or negative xcite the sign and magnitude of the effective g factor determines how many states of the zero level should be included in or excluded from the available states for electron occupation in the case of xmath120 doping at a magnetic field i if xmath131 the xmath104 level center is exactly at xmath132 and the system is electron hole symmetric the total number of negative energy states including the states of the lower half of the xmath104 level and states of the xmath100 branch levels and that of positive energy states including the states of the upper half of the xmath104 level and states of the xmath99 branch levels do not change when changing magnetic field therefore the lower half negative energy states of this level are always filled and the upper half positive energy states of it are available for the occupation of particles which are counted as electrons participating in transport in the case of xmath120 doping ii for a finite positive xmath133 the xmath104 level xmath134 moves downward to negative energy and its distance to the nearest xmath100 branch level is xmath135 closer than to the nearest branch level at finite magnetic field strength xmath2 this is equivalent to the opening of an increasingly enlarged with increasing xmath2 energy gap between the branch states and the states of the zero level and the xmath100 branch levels the opening of a sufficient energy gap implies that with increasing magnetic field the states in the branch levels would no longer shrink into the zero level and thus the xmath104 level should be completely excluded from the conduction band i e only particles occupying the branch states are counted as electrons participating in transport in the case of xmath120 doping when the magnetic field xmath2 gets larger than a certain value depending on the magnitude of xmath30 iii for a finite negative xmath136 the xmath104 level xmath134 moves upward to positive energy and an increasingly enlarged energy gap will be opened between the states of the zero level and the branch and the states of xmath100 branch levels and particles occupying the xmath104 level and branch states are electrons participating in transport when the magnetic field xmath2 gets larger than a certain value as a result the experimentally accessible sheet density xmath33 of electrons participating in transport is related to the fermi energy xmath137 by the following equation valid at finite xmath30 for the magnetic field xmath2 larger than a certain value xmath138 in which xmath139 1 1 is the fermi distribution function at temperature xmath76 and the summation index xmath120 goes over xmath140 for xmath133 or xmath141 for xmath136 in the case of xmath131 xmath142 valid for arbitrary magnetic field in which xmath143 the imaginary part of relative electron density correlation function in the presence of a magnetic field xmath86 can be expressed in the landau representation as xcite xmath144 in which the transform factor xmath145 2 end aligned with xmath146 xmath147 xmath148 and xmath149 being associated laguerre polynomials the landau representation correlation function xmath150 in eq piqw can be constructed with the imaginary part of the retarded green s function xmath151 or the density of states of the xmath120th landau level as xcite xmath152 nonumber hspace 1 2cm times rm im g_n epsilon omega rm im g_ n epsilon end aligned the summation indices xmath120 and xmath153 in eq piqw are taken over xmath140 for xmath133 or xmath154 for xmath136 in the case of xmath131 eq piqw still works and the summation indices xmath120 and xmath153 go over xmath154 but with xmath155 replaced by xmath156 in eq p2nn numerical calculations are performed for the magnetoresistivity xmath157 of surface state in a uniform ti bi xmath0se xmath1 at zero temperature the elastic scattering contributing to the resistivity is modeled by a coulomb potential due to charged impurities xcite xmath158 with xmath159 being the impurity density which is determined by the zero magnetic field mobility xmath5 at temperatures higher than xmath160 xcite phonon scatterings play increasingly important role and the dominant inelastic contribution comes from optical phonons for this polar material the scattering by optical phonons via the deformation potential can be neglected hence we take account of inelastic scattering from optical phonons via frhlich coupling xmath161 in the numerical calculation we use the following parameters xcite fermi velocity xmath162 static dielectric constant xmath163 optical dielectric constant xmath164 and phonon energy xmath165 the broadening parameter is taken to be xmath166 as a function of the magnetic field xmath2 having different effective g factors xmath167 and xmath168 for a ti surface system with electron sheet density xmath169 in the cases of zero magnetic field mobility xmath170 a and xmath171 b several integer number positions of filling factor xmath172 are marked in b scaledwidth 40 0 fig diffg shows the calculated magnetoresistivity xmath157 versus the magnetic field strength xmath2 for a ti surface system with electron sheet density xmath169 but having different effective g factors xmath167 and xmath168 for two values of zero magnetic field mobility xmath170 and xmath171 representing different degree of landau level broadening in the case without zeeman splitting xmath131 the resistivity xmath157 exhibits almost no change with changing magnetic field up to 10 t except the shubnikov de haas sdh oscillation showing up in the case of xmath171 this kind of magnetoresistance behavior was indeed seen experimentally in the electron hole symmetrical massless system of single layer graphene xcite in the case of a positive g factor xmath173 the magnetoresistivity increases linearly with increasing magnetic field while for a negative g factor xmath174 the magnetoresistivity decreases linearly with increasing magnetic field is shown as a function of the magnetic field xmath2 for different values of zero magnetic field mobility a xmath175 b xmath176 c xmath177 d xmath178 e xmath179 and f xmath180 the inset of a illustrates the same for a larger magnetic field range xmath181 the filling factor xmath182 is plotted versus the magnetic field in f and several integer number positions of xmath182 are also marked in d and e here the surface electron density xmath169 and the lattice temperature xmath183 scaledwidth 47 0 in the following we will give more detailed examination on the linearly increasing magnetoresistance in the positive xmath30 case fig rhob shows the calculated resistivity xmath157 versus the magnetic field strength xmath2 at lattice temperature xmath183 for system of carrier sheet density xmath169 and xmath173 having different zero field mobility xmath184 and xmath180 all resistivity curves for mobility xmath185 exhibit clear linearity in the magnetic field range and appear no tendency of saturation at the highest field shown in the figure especially for the case xmath170 the linear behavior extends even up to the magnetic field of xmath186 as illustrated in the inset of fig rhob a this feature contradicts the classical mr which saturates at sufficiently large magnetic field xmath187 note that here we only present the calculated xmath157 for magnetic field xmath2 larger than xmath188 t for which a sufficient energy gap xmath135 is assumed to open that with further increase of the magnetic field the states in the branch levels no longer shrink into the zero level and thus it should be excluded from the conduction band this is of course not true for very weak magnetic field when xmath189 the energy gap xmath190 the situation becomes similar to the case of xmath131 the whole upper half of the zero level states are available to electron occupation and we should have a flat resistivity xmath157 when changing magnetic field with increasing xmath2 the portion of the zero level states available to conduction electrons decreases until the magnetic field reaches xmath191 as a result the resistivity xmath157 should exhibit a crossover from a flat changing at small xmath2 to positively linear increasing at xmath192 this is just the behavior observed in the ti bi xmath0se xmath1 xcite note that in the case of xmath170 the broadened landau level widths are always larger than the neighboring level interval xmath193 which requires xmath194 2 even for the lowest landau level xmath195 i e the whole landau level spectrum is smeared with increasing the zero field mobility the magnitude of resistivity xmath157 decreases and when the broadened landau level width becomes smaller than the neighboring level interval xmath196 a weak sdh oscillation begin to occur around the linearly dependent average value of xmath157 at higher portion of the magnetic field range as seen in fig rhob c d and e for xmath197 and xmath198 on the other hand in the case of large mobility e g xmath199 where the broadened landau level widths xmath200 are much smaller than the neighboring level interval even for level index xmath120 as large as xmath201 the magnetoresistivity shows pronounced sdh oscillation and the linear dependent behavior disappears before the appearance of quantum hall effect xcite as shown in fig rhob f abrikosov s model for the lmr requires the applied magnetic field large enough to reach the quantum limit at which all the carriers are within the lowest landau level xcite while it is obvious that more than one landau levels are occupied in the experimental samples in the field range in which the linear and non saturating magnetoresistivity was observed xcite for the given electron surface density xmath202 the number of occupied landau levels or the filling factor xmath172 at different magnetic fields is shown in fig rhob f as well as in the fig rhob d and e where the integer number positions of xmath203 i e filling up to entire xmath182 landau levels coincide with the minima of the density of states or the dips of sdh oscillation this is in contrast with xmath131 case where the integer number of xmath203 which implies a filling up to the center position of the xmath182th landau levels locates at a peak of sdh oscillation as shown in fig diffg b the observed sdh oscillations in the bi xmath0se xmath1 nanoribbon exhibiting nonsaturating surface lmr in the experiment xcite favor the former case a finite positive effective xmath133 is plotted as a function of the surface electron density xmath33 at magnetic field xmath204 a at different values of zero field mobility xmath5 and b at different values of zero field conductivity xmath205 scaledwidth 40 0 at various lattice temperatures here the zero magnetic field mobility at zero temperature is xmath206 scaledwidth 35 0 next we examine the density dependence of the linear magnetoresistivity to compare with abrikosov s quantum magnetoresistance which suggests a xmath207 behavior xcite we show the calculated xmath208 for above lmr versus the carrier sheet density xmath33 in fig rhon at fixed magnetic field xmath209 t the mobility is taken respectively to be xmath210 and xmath211m xmath212 vs to make the resistivity in the lmr regime a clearly linear dependence of xmath213 on the surface density xmath33 is seen in all cases indicating that this non saturating linear resistivity is almost inversely proportional to the carrier density in the figure we also show xmath208 versus xmath33 under the condition of different given conductivity xmath214 and xmath215 in this case the half width xmath216 is independent of surface density the linear dependence still holds indicating that this linear behavior is not sensitive to the modest xmath33 dependence of landau level broadening xmath216 as long as the system is in the overlapped landau level regime from the above discussion it is obvious that lmr shows up in the system having overlapped landau levels and the separation of landau levels makes the mr departure from the linear increase at high temperature the thermal energy would smear the level separation and phonon scatterings further broaden landau levels hence it is believed that this lmr will be robust against raising temperature this is indeed the case as seen in fig rhot where we plot the calculated magnetoresistivity xmath157 for the above system with zero temperature linear mobility xmath217m xmath212 vs versus the magnetic field at different lattice temperatures we can see that raising temperature to room temperature has little effect on the linearity of mr due to the decreased mobility at higher temperature from phonon scattering the weak sdh oscillation on the linear background tends to vanish these features are in good agreement with the experimental report xcite in summary we have studied the two dimensional magnetotransport in the flat surface of a three dimensional ti which arises from the surface states with a wavevector linear energy dispersion and a finite positive zeeman splitting within the bulk energy gap when the level broadening is comparable to or larger than the landau level separation and the conduction electrons spread over many landau levels a positive dominantly linear and non saturating magnetoresistance appears within a quite wide range of magnetic field and persists up to room temperature this remarkable lmr provides a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi xmath0se xmath1 nanoribbons xcite in contrast to quantum hall effect which appears in the case of well formed landau levels and to abrikosov s quantum magnetotransport xcite which is limited to the extreme quantum limit that all electrons coalesce into the lowest landau level the discussed lmr is a phenomena of pure classical two dimensional magnetotransport in a system having linear energy dispersion appearing in the regime of overlapped landau levels irrespective of its showing up in relatively high magnetic field range furthermore the present scheme deals with spatially uniform case without invoking the mobility fluctuation in a strongly inhomogeneous system which is required in the classical parish and littlewood model to produce a lmr xcite the appearance of this significant positive increasing linear magnetoresistance depends on the existence of a positive and sizable effective g factor if the zeeman energy splitting is quite small the resistivity xmath157 would exhibit little change with changing magnetic field in the case of a negative and sizable effective g factor the magnetoresistivity would decrease linearly with increasing magnetic field therefore the behavior of the longitudinal resistivity versus magnetic field may provide a useful way for judging the direction and the size of the effective zeeman energy splitting in ti surface states this work was supported by the national science foundation of china grant no 11104002 the national basic research program of china grant no 2012cb927403 and by the program for science technology innovation talents in universities of henan province grant no 2012hastit029
import copy import tempfile import unittest from transformers import LEDConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, LEDModel, LEDTokenizer, ) from transformers.models.led.modeling_led import LEDDecoder, LEDEncoder def prepare_led_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class LEDModelTester: def __init__( self, parent, batch_size=13, seq_length=11, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=32, eos_token_id=2, pad_token_id=1, bos_token_id=0, attention_window=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.attention_window = attention_window self.encoder_key_length = self.attention_window + 2 self.encoder_seq_length = self.seq_length def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_led_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return LEDConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, attention_window=self.attention_window, ) def get_pipeline_config(self): config = self.get_config() config.max_position_embeddings = 100 config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() global_attention_mask = torch.zeros_like(inputs_dict["input_ids"]) global_attention_mask[:, -1] = 1 inputs_dict["global_attention_mask"] = global_attention_mask return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = LEDModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = LEDModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = LEDEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder( inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"], global_attention_mask=inputs_dict["global_attention_mask"], )[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = LEDDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) def check_global_attention(self, config, inputs_dict): model = LEDModel(config=config).to(torch_device).eval() model.config.output_attentions = True attention_mask = ids_tensor(inputs_dict["input_ids"].shape, vocab_size=2) global_attention_mask = torch.zeros_like(attention_mask) num_tokens_with_global_attention = 2 attention_mask[:, 2 : 2 + num_tokens_with_global_attention] = 1 global_attention_mask[:, 2 : 2 + num_tokens_with_global_attention] = 1 inputs_dict["attention_mask"] = attention_mask inputs_dict["global_attention_mask"] = global_attention_mask outputs = model(**inputs_dict) self.parent.assertIsNotNone(outputs.encoder_global_attentions) self.parent.assertTrue( outputs.encoder_global_attentions[0].shape, (self.batch_size, self.num_attention_heads, self.encoder_seq_length, num_tokens_with_global_attention), ) @require_torch class LEDModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (LEDModel, LEDForConditionalGeneration, LEDForSequenceClassification, LEDForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (LEDForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": LEDForConditionalGeneration, "feature-extraction": LEDModel, "question-answering": LEDForQuestionAnswering, "summarization": LEDForConditionalGeneration, "text-classification": LEDForSequenceClassification, "text2text-generation": LEDForConditionalGeneration, "translation": LEDForConditionalGeneration, "zero-shot": LEDForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_missing_keys = False test_torchscript = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def setUp(self): self.model_tester = LEDModelTester(self) self.config_tester = ConfigTester(self, config_class=LEDConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_global_attention(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_global_attention(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (LEDModel, LEDForConditionalGeneration, LEDForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = LEDForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_retain_grad_hidden_states_attentions(self): return def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = self.model_tester.seq_length encoder_seq_length = self.model_tester.encoder_seq_length encoder_key_length = self.model_tester.encoder_key_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) correct_outlen = 6 if "labels" in inputs_dict: correct_outlen += 1 if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, seq_length, seq_length, ], ) def assert_tensors_close(a, b, atol=1e-12, prefix=""): if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class LEDModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return LEDTokenizer.from_pretrained("allenai/led-base-16384") def test_inference_no_head(self): model = LEDModel.from_pretrained("allenai/led-base-16384").to(torch_device) input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict).last_hidden_state expected_shape = torch.Size((1, 1024, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): model = LEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").to(torch_device) input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict, use_cache=False).logits expected_shape = torch.Size((1, 1024, model.config.vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): hf = LEDForConditionalGeneration.from_pretrained("allenai/led-large-16384-arxiv").to(torch_device) tok = LEDTokenizer.from_pretrained("allenai/led-large-16384-arxiv") ARTICLE_LEP = r ARTICLE_MAGNET = r dct = tok.batch_encode_plus( [ARTICLE_LEP, ARTICLE_MAGNET], max_length=6144, padding="max_length", truncation=True, return_tensors="pt", ) hypotheses_batch = hf.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=4, max_length=512, early_stopping=True, no_repeat_ngram_size=3, ) EXPECTED_LEP = ( " the physics of @xmath0-boson will again play the central role in the frontier of particle physics if the" " gigaz option of the international linear collider ( ilc ) can be realized in its first phase. \n the" " expected sensitivity to the branching ratio of rare decays, especially its exotic or rare processes," " should be investigated comprehensively to evaluate their potential in probing new physics. in this work" " \n, we study the rare decay into light higgs boson(s ) in the framework of the minimal supersymmetric" " standard model ( mssm ), where a light cp - odd higgs - boson with singlet - dominant component may" " naturally arise from the spontaneous breaking of some approximate global symmetry. " ) EXPECTED_MAGNET = ( " the recent experiment in the surface states of the topological insulator bi@xmath0se @xmath1, however," " reported that a large positive magnetoresistance becomes very linear in perpendicular magnetic field" " even in an opposite situation where the carrier sheet density is high that all electrons occupy more" " than one landau levels. \n it is striking that this observation is in conflict with abrikosov s model" " and also with the classical parish - littlewood model. " ) generated = tok.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == [EXPECTED_LEP, EXPECTED_MAGNET]
codingutf8 iz beltagy matthew e peters arman cohan and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license modeltestermixin testattentionoutputs is expecting attention tensors to be of size numattentionheads encoderseqlength encoderkeylength but tflongformerselfattention returns attention of shape numattentionheads encoderseqlength self attentionwindow 1 because its local attention only attends to self attentionwindow and one before and one after because of padding encoderseqlength is different from seqlength relevant for the testattentionoutputs and testhiddenstatesoutput tests first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice check that output attentions can also be changed via the config check attention is always last and order is fine todo headmasking not yet implement change to intended input here change to expected output here change to intended input here change to expected output here coding utf 8 iz beltagy matthew e peters arman cohan and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license modeltestermixin test_attention_outputs is expecting attention tensors to be of size num_attention_heads encoder_seq_length encoder_key_length but tflongformerselfattention returns attention of shape num_attention_heads encoder_seq_length self attention_window 1 because its local attention only attends to self attention_window and one before and one after because of padding encoder_seq_length is different from seq_length relevant for the test_attention_outputs and test_hidden_states_output tests first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice check that output attentions can also be changed via the config check attention is always last and order is fine todo head masking not yet implement change to intended input here change to expected output here change to intended input here change to expected output here
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class TFLEDModelTester: config_cls = LEDConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, attention_window=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.attention_window = attention_window self.key_length = self.attention_window + 2 self.encoder_seq_length = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, ) inputs_dict = prepare_led_inputs_dict(config, input_ids, decoder_input_ids) global_attention_mask = tf.concat( [tf.zeros_like(input_ids)[:, :-1], tf.ones_like(input_ids)[:, -1:]], axis=-1, ) inputs_dict["global_attention_mask"] = global_attention_mask return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFLEDModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] self.batch_size = 1 outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_led_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class TFLEDModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () all_generative_model_classes = (TFLEDForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFLEDModelTester(self) self.config_tester = ConfigTester(self, config_class=LEDConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict["global_attention_mask"] = tf.zeros_like(inputs_dict["attention_mask"]) num_global_attn_indices = 2 inputs_dict["global_attention_mask"] = tf.where( tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices, 1, inputs_dict["global_attention_mask"], ) config.return_dict = True seq_length = self.model_tester.seq_length encoder_seq_length = self.model_tester.encoder_seq_length def check_decoder_attentions_output(outputs): decoder_attentions = outputs.decoder_attentions self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], ) def check_encoder_attentions_output(outputs): attentions = [t.numpy() for t in outputs.encoder_attentions] global_attentions = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertEqual(len(global_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], ) self.assertListEqual( list(global_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["use_cache"] = False config.output_hidden_states = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) out_len = len(outputs) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) if self.is_encoder_decoder: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_decoder_attentions_output(outputs) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) inputs_dict["output_attentions"] = True config.output_hidden_states = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs)) self.assertEqual(model.config.output_hidden_states, True) check_encoder_attentions_output(outputs) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing.") def test_saved_model_creation(self): pass def test_generate_with_headmasking(self): pass def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) TOLERANCE = 1e-4 @slow @require_tf class TFLEDModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").led input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 1024, 768) self.assertEqual(output.shape, expected_shape) expected_slice = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-3) def test_inference_with_head(self): model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384") input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]]) inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape, expected_shape) expected_slice = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-3, rtol=1e-3)
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import LevitImageProcessor class LevitImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class LevitImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = LevitImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = LevitImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch levit model import unittest import warnings from math import ceil floor from transformers import levitconfig from transformers fileutils import cachedproperty istorchavailable isvisionavailable from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelforimageclassificationmapping modelmapping levitforimageclassification levitforimageclassificationwithteacher levitmodel from transformers models levit modelinglevit import levitpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import levitimageprocessor class levitconfigtesterconfigtester def createandtestconfigcommonpropertiesself config self configclassself inputsdict self parent asserttruehasattrconfig hiddensizes self parent asserttruehasattrconfig numattentionheads class levitmodeltester def init self parent batchsize13 imagesize64 numchannels3 kernelsize3 stride2 padding1 patchsize16 hiddensizes16 32 48 numattentionheads1 2 3 depths2 3 4 keydim8 8 8 droppathrate0 mlpratio2 2 2 attentionratio2 2 2 initializerrange0 02 istrainingtrue uselabelstrue numlabels2 check self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self kernelsize kernelsize self stride stride self padding padding self hiddensizes hiddensizes self numattentionheads numattentionheads self depths depths self keydim keydim self droppathrate droppathrate self patchsize patchsize self attentionratio attentionratio self mlpratio mlpratio self initializerrange initializerrange self downops subsample keydim0 hiddensizes0 keydim0 4 2 2 subsample keydim0 hiddensizes1 keydim0 4 2 2 self istraining istraining self uselabels uselabels self numlabels numlabels self initializerrange initializerrange def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself return levitconfig imagesizeself imagesize numchannelsself numchannels kernelsizeself kernelsize strideself stride paddingself padding patchsizeself patchsize hiddensizesself hiddensizes numattentionheadsself numattentionheads depthsself depths keydimself keydim droppathrateself droppathrate mlpratioself mlpratio attentionratioself attentionratio initializerrangeself initializerrange downopsself downops def createandcheckmodelself config pixelvalues labels model levitmodelconfigconfig model totorchdevice model eval result modelpixelvalues imagesize self imagesize self imagesize height width imagesize0 imagesize1 for in range4 height floorheight 2 self padding self kernelsize self stride 1 width floorwidth 2 self padding self kernelsize self stride 1 self parent assertequal result lasthiddenstate shape self batchsize ceilheight 4 ceilwidth 4 self hiddensizes1 def createandcheckforimageclassificationself config pixelvalues labels config numlabels self numlabels model levitforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self numlabels def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class levitmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses levitmodel levitforimageclassification levitforimageclassificationwithteacher if istorchavailable else pipelinemodelmapping featureextraction levitmodel imageclassification levitforimageclassification levitforimageclassificationwithteacher if istorchavailable else testpruning false testtorchscript false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester levitmodeltesterself self configtester configtesterself configclasslevitconfig hastextmodalityfalse hiddensize37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return unittest skipreasonlevit does not use inputsembeds def testinputsembedsself pass unittest skipreasonlevit does not support input and output embeddings def testmodelcommonattributesself pass unittest skipreasonlevit does not output attentions def testattentionoutputsself pass def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers lenself modeltester depths 1 self assertequallenhiddenstates expectednumlayers imagesize self modeltester imagesize self modeltester imagesize height width imagesize0 imagesize1 for in range4 height floor height 2 self modeltester padding self modeltester kernelsize self modeltester stride 1 width floor width 2 self modeltester padding self modeltester kernelsize self modeltester stride 1 verify the first hidden states first block self assertlistequal listhiddenstates0 shape2 height width self modeltester hiddensizes0 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass name levitforimageclassificationwithteacher del inputsdictlabels return inputsdict def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs special case for levitforimageclassificationwithteacher model def testtrainingself if not self modeltester istraining return config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses levitforimageclassificationwithteacher supports inferenceonly if modelclass in getvaluesmodelmapping or modelclass name levitforimageclassificationwithteacher continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testtraininggradientcheckpointingself config inputsdict self modeltester prepareconfigandinputsforcommon if not self modeltester istraining return config usecache false config returndict true for modelclass in self allmodelclasses if modelclass in getvaluesmodelmapping or not modelclass supportsgradientcheckpointing continue levitforimageclassificationwithteacher supports inferenceonly if modelclass name levitforimageclassificationwithteacher continue model modelclassconfig model gradientcheckpointingenable model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testproblemtypesself config inputsdict self modeltester prepareconfigandinputsforcommon problemtypes title multilabelclassification numlabels 2 dtype torch float title singlelabelclassification numlabels 1 dtype torch long title regression numlabels 1 dtype torch float for modelclass in self allmodelclasses if modelclass not in getvaluesmodelforimageclassificationmapping or modelclass name levitforimageclassificationwithteacher continue for problemtype in problemtypes with self subtestmsgftesting modelclass with problemtype title config problemtype problemtypetitle config numlabels problemtypenumlabels model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue if problemtypenumlabels 1 inputslabels inputslabels unsqueeze1 repeat1 problemtypenumlabels inputslabels inputslabels toproblemtypedtype this tests that we do not trigger the warning form pytorch using a target size that is different to the input size this will likely lead to incorrect results due to broadcasting please ensure they have the same size which is a symptom something in wrong for the regression problem see https github comhuggingfacetransformersissues11780 with warnings catchwarningsrecordtrue as warninglist loss modelinputs loss for w in warninglist if using a target size that is different to the input size in strw message raise valueerror fsomething is going wrong in the regression problem intercepted w message loss backward slow def testmodelfrompretrainedself for modelname in levitpretrainedmodelarchivelist 1 model levitmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class levitmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return levitimageprocessor frompretrainedlevitpretrainedmodelarchivelist0 slow def testinferenceimageclassificationheadself model levitforimageclassificationwithteacher frompretrainedlevitpretrainedmodelarchivelist0 to torchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor1 0448 0 3745 1 8317 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch levit model check here we also overwrite some of the tests of test_modeling_common py as levit does not use input_ids inputs_embeds attention_mask and seq_length verify the first hidden states first block check that output_hidden_states also work using config special case for levitforimageclassificationwithteacher model levitforimageclassificationwithteacher supports inference only levitforimageclassificationwithteacher supports inference only this tests that we do not trigger the warning form pytorch using a target size that is different to the input size this will likely lead to incorrect results due to broadcasting please ensure they have the same size which is a symptom something in wrong for the regression problem see https github com huggingface transformers issues 11780 we will verify our results on an image of cute cats forward pass verify the logits
import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class LevitConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class LevitModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, kernel_size=3, stride=2, padding=1, patch_size=16, hidden_sizes=[16, 32, 48], num_attention_heads=[1, 2, 3], depths=[2, 3, 4], key_dim=[8, 8, 8], drop_path_rate=0, mlp_ratio=[2, 2, 2], attention_ratio=[2, 2, 2], initializer_range=0.02, is_training=True, use_labels=True, num_labels=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.hidden_sizes = hidden_sizes self.num_attention_heads = num_attention_heads self.depths = depths self.key_dim = key_dim self.drop_path_rate = drop_path_rate self.patch_size = patch_size self.attention_ratio = attention_ratio self.mlp_ratio = mlp_ratio self.initializer_range = initializer_range self.down_ops = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.initializer_range = initializer_range def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return LevitConfig( image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, ) def create_and_check_model(self, config, pixel_values, labels): model = LevitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) image_size = (self.image_size, self.image_size) height, width = image_size[0], image_size[1] for _ in range(4): height = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1) width = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = LevitForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = LevitModelTester(self) self.config_tester = ConfigTester(self, config_class=LevitConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="Levit does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Levit does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="Levit does not output attentions") def test_attention_outputs(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depths) + 1 self.assertEqual(len(hidden_states), expected_num_layers) image_size = (self.model_tester.image_size, self.model_tester.image_size) height, width = image_size[0], image_size[1] for _ in range(4): height = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) width = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) self.assertListEqual( list(hidden_states[0].shape[-2:]), [ height * width, self.model_tester.hidden_sizes[0], ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if ( model_class in get_values(MODEL_MAPPING) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: continue if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def test_model_from_pretrained(self): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = LevitModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class LevitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def test_inference_image_classification_head(self): model = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([1.0448, -0.3745, -1.8317]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license ensure that bbox is legal todo fix the failed tests forward pass coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license ensure that bbox is legal todo fix the failed tests forward pass
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class LiltModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=24, num_hidden_layers=2, num_attention_heads=6, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.range_bbox = range_bbox def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def get_config(self): return LiltConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, ): model = LiltModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, ): config.num_labels = self.num_labels model = LiltForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, ): model = LiltForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class LiltModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = False test_pruning = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = LiltModelTester(self) self.config_tester = ConfigTester(self, config_class=LiltConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = LiltModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch @slow class LiltModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(torch_device) input_ids = torch.tensor([[1, 2]], device=torch_device) bbox = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=torch_device) with torch.no_grad(): outputs = model(input_ids=input_ids, bbox=bbox) expected_shape = torch.Size([1, 2, 768]) expected_slice = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]], device=torch_device, ) self.assertTrue(outputs.last_hidden_state.shape, expected_shape) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], expected_slice, atol=1e-3))
codingutf8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing checks it save with the same files the tokenizer json file for the fast one checks everything loads correctly in the same way check special tokens are set accordingly on rust and python save tokenizer rust legacyformattrue checks it save with the same files checks everything loads correctly in the same way check special tokens are set accordingly on rust and python save tokenizer rust legacyformatfalse checks it saved the tokenizer json file checks everything loads correctly in the same way check special tokens are set accordingly on rust and python longer text that will definitely require truncation maxtargetlength will default to maxlength if not specified this is excruciatingly slow since it has to recreate the entire merge list from the original vocabulary in spm reopening since f is in bytes bytefallback showcase inner spaces showcase the word inform should be split as in form decoding strips the added prefix space todo arthurz currently we strip left and right so this will not keep the spaces let s make sure decoding does not add extra spaces here and there todo arthurz this should be affected by the lstriprstripsingle word normalize refactoring since currently we always strip left and right of the token results are as such let s make sure that if there are any spaces we don t remove them a dummy prefix space is not added by the spmodel as it was deactivated a dummy prefix space is not added by the spmodel as it was deactivated a dummy prefix space is not added by the spmodel as it was deactivated matt the third test case tests the default system message but if this is ever changed in the classrepo code then that test will fail and the case will need to be updated fmt off fmt on a class that regroups important test to make sure that we properly handle the special tokens make sure is prepended and outputs match spmodel s sentencepiece normalizerspec adddummyprefix attribute make sure the extra spaces are eaten since the sample vocab does not have sentencepiece normalizerspec removeextrawhitespaces attribute is set to false is also a whitespace make sure that the output after the extra id is the same as if extraid was not there make sure that tokenizer tokenize is similar to adding the equivalent special token to the vocab the last token should be 100 spaces are eaten by rstrip lstrip spm spmodel encode coding utf 8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing checks it save with the same files the tokenizer json file for the fast one checks everything loads correctly in the same way check special tokens are set accordingly on rust and python save tokenizer rust legacy_format true checks it save with the same files checks everything loads correctly in the same way check special tokens are set accordingly on rust and python save tokenizer rust legacy_format false checks it saved the tokenizer json file checks everything loads correctly in the same way check special tokens are set accordingly on rust and python longer text that will definitely require truncation max_target_length will default to max_length if not specified from_slow true unfortunately too slow to convert fmt skip this is excruciatingly slow since it has to recreate the entire merge list from the original vocabulary in spm re opening since f is in bytes bytefallback showcase fmt skip fmt skip inner spaces showcase the word inform should be split as in form decoding strips the added prefix space 29871 is the spiece underline added as it should todo arthurz currently we strip left and right so this will not keep the spaces let s make sure decoding does not add extra spaces here and there todo arthurz this should be affected by the lstrip rstrip single word normalize refactoring since currently we always strip left and right of the token results are as such let s make sure that if there are any spaces we don t remove them a dummy prefix space is not added by the sp_model as it was de activated a dummy prefix space is not added by the sp_model as it was de activated a dummy prefix space is not added by the sp_model as it was de activated matt the third test case tests the default system message but if this is ever changed in the class repo code then that test will fail and the case will need to be updated fmt off fmt on a class that regroups important test to make sure that we properly handle the special tokens make sure is prepended and outputs match sp_model s sentencepiece normalizerspec add_dummy_prefix attribute make sure the extra spaces are eaten since the sample vocab does not have ______ sentencepiece normalizerspec remove_extra_whitespaces attribute is set to false is also a whitespace no extra space added spaces are eaten by spm our strip make sure that the output after the extra id is the same as if extra_id was not there spaces are eaten by spm even if not start make sure that tokenizer tokenize is similar to adding the equivalent special token to the vocab the last token should be 100 spaces are eaten by rstrip lstrip spm sp_model encode spaces are eaten by rstrip lstrip
import os import pickle import shutil import tempfile import unittest from datasets import load_dataset from transformers import ( SPIECE_UNDERLINE, AddedToken, LlamaTokenizer, LlamaTokenizerFast, is_torch_available, ) from transformers.convert_slow_tokenizer import convert_slow_tokenizer from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_jinja, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): pass @require_sentencepiece @require_tokenizers class LlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LlamaTokenizer rust_tokenizer_class = LlamaTokenizerFast test_rust_tokenizer = False test_sentencepiece = True from_pretrained_kwargs = {} def setUp(self): super().setUp() tokenizer = LlamaTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.pad_token = tokenizer.eos_token tokenizer.save_pretrained(self.tmpdirname) def get_tokenizers(self, **kwargs): kwargs.update({"pad_token": "<PAD>"}) return super().get_tokenizers(**kwargs) def test_full_tokenizer(self): tokenizer = LlamaTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @unittest.skip("Let's wait for the fast tokenizer!") def test_save_pretrained(self): self.tokenizers_list += (self.rust_tokenizer_class, "hf-internal-testing/llama-tokenizer", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) @require_torch def test_batch_tokenization(self): if not self.test_seq2seq: return tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): text = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] try: batch = tokenizer( text=text, max_length=3, max_target_length=10, return_tensors="pt", ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1], 3) batch = tokenizer(text, max_length=3, return_tensors="pt") self.assertEqual(batch.input_ids.shape[1], 3) batch_encoder_only = tokenizer(text=text, max_length=3, max_target_length=10, return_tensors="pt") self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertNotIn("decoder_input_ids", batch_encoder_only) @unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.") def test_save_slow_from_fast_and_reload_fast(self): pass def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) r_output = tokenizer_r.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[1, 4103, 689, 414, 313, 24784, 368, 2998, 408, 282, 3637, 25350, 29899, 9067, 414, 322, 282, 3637, 25350, 29899, 1457, 3018, 1312, 29899, 2151, 29897, 8128, 2498, 29899, 15503, 4220, 6956, 1973, 313, 13635, 29911, 29892, 402, 7982, 29899, 29906, 29892, 1528, 13635, 29911, 29874, 29892, 1060, 26369, 29892, 6652, 309, 29933, 814, 29892, 1060, 29931, 6779, 11410, 363, 18385, 17088, 7634, 11235, 313, 25103, 29965, 29897, 322, 18385, 17088, 28203, 313, 25103, 29954, 29897, 411, 975, 29871, 29941, 29906, 29974, 758, 3018, 1312, 4733, 297, 29871, 29896, 29900, 29900, 29974, 10276, 322, 6483, 1006, 3372, 3097, 1546, 435, 1165, 29892, 10772, 29911, 25350, 322, 323, 6073, 17907, 29889], [1, 350, 20161, 338, 8688, 304, 758, 29899, 14968, 6483, 21000, 8684, 284, 22540, 515, 443, 29880, 24025, 1426, 491, 14002, 368, 4195, 292, 373, 1716, 2175, 322, 1492, 3030, 297, 599, 15359, 29889], [1, 450, 4996, 17354, 1701, 29916, 432, 17204, 975, 278, 17366, 11203, 29889]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="hf-internal-testing/llama-tokenizer", revision="0984d03108b1a041ed679bd253b6519b7e1a4778", padding=False, ) def test_picklable(self): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SAMPLE_VOCAB, f.name) tokenizer = LlamaTokenizer(f.name, keep_accents=True) pickled_tokenizer = pickle.dumps(tokenizer) pickle.loads(pickled_tokenizer) @unittest.skip("worker 'gw4' crashed on CI, passing locally.") def test_pickle_subword_regularization_tokenizer(self): pass @unittest.skip("worker 'gw4' crashed on CI, passing locally.") def test_subword_regularization_tokenizer(self): pass @require_torch @require_sentencepiece @require_tokenizers class LlamaIntegrationTest(unittest.TestCase): @classmethod def setUpClass(cls): checkpoint_name = "hf-internal-testing/llama-tokenizer-non-normalized" cls.tokenizer: LlamaTokenizer = LlamaTokenizer.from_pretrained(checkpoint_name) cls.rust_tokenizer = LlamaTokenizerFast.from_pretrained(checkpoint_name) return cls @require_torch def integration_tests(self): inputs = self.tokenizer( ["The following string should be properly encoded: Hello.", "But ird and ปี ird ด"], return_tensors="pt", ) self.assertEqual( nested_simplify(inputs), { "input_ids": [ [1, 450, 1494, 1347, 881, 367, 6284, 18511, 29901, 15043, 29889], [1, 1205, 29871, 1823, 322, 29871, 31010, 30691, 1678, 1823, 1678, 30718], ], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], }, ) def test_fast_special_tokens(self): slow_tokenizer = self.tokenizer fast_tokenizer = self.rust_tokenizer slow = slow_tokenizer.encode("A sample test", add_special_tokens=True) assert slow == [1, 319, 4559, 1243] fast_tokenizer.add_eos_token = False fast = fast_tokenizer.encode("A sample test", add_special_tokens=True) assert fast == [1, 319, 4559, 1243] fast_tokenizer.add_eos_token = True fast = fast_tokenizer.encode("A sample test", add_special_tokens=True) assert fast == [1, 319, 4559, 1243, 2] slow_tokenizer.add_eos_token = True slow = slow_tokenizer.encode("A sample test", add_special_tokens=True) assert slow == [1, 319, 4559, 1243, 2] fast_tokenizer = LlamaTokenizerFast.from_pretrained( "hf-internal-testing/llama-tokenizer", add_eos_token=True, add_bos_token=False ) fast = fast_tokenizer.encode("A sample test", add_special_tokens=True) assert fast == [319, 4559, 1243, 2] slow_tokenzier = LlamaTokenizer.from_pretrained( "hf-internal-testing/llama-tokenizer", add_eos_token=True, add_bos_token=False ) slow = slow_tokenzier.encode("A sample test", add_special_tokens=True) assert slow == [319, 4559, 1243, 2] self.tokenizer.add_eos_token = False self.rust_tokenizer.add_eos_token = False @slow def test_conversion(self): self.rust_tokenizer.save_pretrained("./out") with tempfile.TemporaryDirectory() as dirname: self.rust_tokenizer.save_pretrained(dirname) with open(os.path.join(dirname, "tokenizer.json"), "r") as f: old_serialized = f.read() new_tokenizer = convert_slow_tokenizer(self.tokenizer) with tempfile.NamedTemporaryFile() as f: new_tokenizer.save(f.name) new_serialized = open(f.name, "r").read() with open("out_tokenizer.json", "w") as g: g.write(new_serialized) self.assertEqual(old_serialized, new_serialized) def test_simple_encode_decode(self): pyth_tokenizer = self.tokenizer rust_tokenizer = self.rust_tokenizer self.assertEqual(pyth_tokenizer.encode("This is a test"), [1, 910, 338, 263, 1243]) self.assertEqual(rust_tokenizer.encode("This is a test"), [1, 910, 338, 263, 1243]) self.assertEqual(pyth_tokenizer.decode([1, 910, 338, 263, 1243], skip_special_tokens=True), "This is a test") self.assertEqual(rust_tokenizer.decode([1, 910, 338, 263, 1243], skip_special_tokens=True), "This is a test") self.assertEqual(pyth_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) self.assertEqual( pyth_tokenizer.decode( [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392], skip_special_tokens=True ), "生活的真谛是", ) self.assertEqual( rust_tokenizer.decode( [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392], skip_special_tokens=True ), "生活的真谛是", ) self.assertEqual(pyth_tokenizer.encode("Hi Hello"), [1, 6324, 29871, 15043]) self.assertEqual(rust_tokenizer.encode("Hi Hello"), [1, 6324, 29871, 15043]) self.assertEqual(pyth_tokenizer.decode([1, 6324, 29871, 15043], skip_special_tokens=True), "Hi Hello") self.assertEqual(rust_tokenizer.decode([1, 6324, 29871, 15043], skip_special_tokens=True), "Hi Hello") self.assertEqual(pyth_tokenizer.encode("Hi Hello"), [1, 6324, 259, 15043]) self.assertEqual(rust_tokenizer.encode("Hi Hello"), [1, 6324, 259, 15043]) self.assertEqual(pyth_tokenizer.decode([1, 6324, 259, 15043], skip_special_tokens=True), "Hi Hello") self.assertEqual(rust_tokenizer.decode([1, 6324, 259, 15043], skip_special_tokens=True), "Hi Hello") self.assertEqual(pyth_tokenizer.encode(""), [1]) self.assertEqual(rust_tokenizer.encode(""), [1]) self.assertEqual(pyth_tokenizer.encode(" "), [1, 259]) self.assertEqual(rust_tokenizer.encode(" "), [1, 259]) self.assertEqual(pyth_tokenizer.encode(" "), [1, 1678]) self.assertEqual(rust_tokenizer.encode(" "), [1, 1678]) self.assertEqual(pyth_tokenizer.encode(" Hello"), [1, 29871, 15043]) self.assertEqual(rust_tokenizer.encode(" Hello"), [1, 29871, 15043]) def test_no_differences_showcase(self): pyth_tokenizer = self.tokenizer rust_tokenizer = self.rust_tokenizer self.assertEqual(pyth_tokenizer.encode(""), [1]) self.assertEqual(rust_tokenizer.encode(""), [1]) self.assertEqual(pyth_tokenizer.encode(" "), [1, 259]) self.assertEqual(rust_tokenizer.encode(" "), [1, 259]) self.assertEqual(pyth_tokenizer.encode(" "), [1, 1678]) self.assertEqual(rust_tokenizer.encode(" "), [1, 1678]) self.assertEqual(pyth_tokenizer.encode(" Hello"), [1, 29871, 15043]) self.assertEqual(rust_tokenizer.encode(" Hello"), [1, 29871, 15043]) self.assertEqual(pyth_tokenizer.encode("<s>"), [1, 1]) self.assertEqual(rust_tokenizer.encode("<s>"), [1, 1]) def test_no_differences_decode(self): pyth_tokenizer = self.tokenizer rust_tokenizer = self.rust_tokenizer self.assertEqual(pyth_tokenizer.decode([869]), ".") self.assertEqual(rust_tokenizer.decode([869]), ".") self.assertEqual(pyth_tokenizer.decode([30112, 869]), "ا .") self.assertEqual(rust_tokenizer.decode([30112, 869]), "ا .") def test_no_differences_special_tokens(self): pyth_tokenizer = self.tokenizer rust_tokenizer = self.rust_tokenizer self.assertEqual(pyth_tokenizer.encode(""), [1]) self.assertEqual(rust_tokenizer.encode(""), [1]) self.assertEqual(pyth_tokenizer.encode("<s>"), [1, 1]) self.assertEqual(rust_tokenizer.encode("<s>"), [1, 1]) @unittest.skipIf( os.getenv("RUN_TOKENIZER_INTEGRATION", "0") == "0", "RUN_TOKENIZER_INTEGRATION=1 to run tokenizer integration tests", ) def test_integration_test_xnli(self): import tqdm pyth_tokenizer = self.tokenizer rust_tokenizer = self.rust_tokenizer dataset = load_dataset("code_x_glue_ct_code_to_text", "go") for item in tqdm.tqdm(dataset["validation"]): string = item["code"] encoded1 = pyth_tokenizer.encode(string) encoded2 = rust_tokenizer.encode(string) self.assertEqual(encoded1, encoded2) decoded1 = pyth_tokenizer.decode(encoded1, skip_special_tokens=True) decoded2 = rust_tokenizer.decode(encoded2, skip_special_tokens=True) self.assertEqual(decoded1, decoded2) dataset = load_dataset("xnli", "all_languages") for item in tqdm.tqdm(dataset["train"]): for string in item["premise"].values(): encoded1 = pyth_tokenizer.encode(string) encoded2 = rust_tokenizer.encode(string) self.assertEqual(encoded1, encoded2) decoded1 = pyth_tokenizer.decode(encoded1, skip_special_tokens=True) decoded2 = rust_tokenizer.decode(encoded2, skip_special_tokens=True) self.assertEqual(decoded1, decoded2) def test_special_token_special_word(self): tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", legacy=False) tokenizer.add_tokens([AddedToken("<REPR_END>", rstrip=True, lstrip=True)], special_tokens=False) out1 = tokenizer.decode( tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=False ) self.assertEqual(out1, "<REPR_END>inform") out2 = tokenizer.decode( tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=True ) self.assertEqual(out2, "<REPR_END> inform") input_ids = tokenizer.encode("<REPR_END>inform", add_special_tokens=False) self.assertEqual(input_ids, [29871, 32000, 262, 689]) out2 = tokenizer.decode( tokenizer.encode(" <REPR_END> inform", add_special_tokens=False), spaces_between_special_tokens=False ) self.assertEqual(out2, "<REPR_END>inform") input_ids = tokenizer.encode("<s> Hello<s>how", add_special_tokens=False) self.assertEqual(input_ids, [1, 15043, 1, 3525]) tokens = tokenizer.tokenize("<s> Hello<s>how", add_special_tokens=False) self.assertEqual(tokens, ["<s>", "▁Hello", "<s>", "how"]) decoded_tokens = tokenizer.decode(input_ids) self.assertEqual(decoded_tokens, "<s> Hello<s>how") input_ids = tokenizer.encode(" <s> Hello<s> how", add_special_tokens=False) self.assertEqual(input_ids, [259, 1, 15043, 1, 920]) tokens = tokenizer.tokenize(" <s> Hello<s> how", add_special_tokens=False) self.assertEqual(tokens, ["▁▁", "<s>", "▁Hello", "<s>", "▁how"]) decoded_tokens = tokenizer.decode(input_ids) self.assertEqual(decoded_tokens, " <s> Hello<s> how") def test_some_edge_cases(self): tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", legacy=False) sp_tokens = tokenizer.sp_model.encode("<s>>", out_type=str) self.assertEqual(sp_tokens, ["<", "s", ">>"]) tokens = tokenizer.tokenize("<s>>") self.assertNotEqual(sp_tokens, tokens) self.assertEqual(tokens, ["<s>", ">"]) tokens = tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("", out_type=str)) tokens = tokenizer.tokenize(" ") self.assertEqual(tokens, ["▁▁"]) self.assertEqual(tokens, tokenizer.sp_model.encode(" ", out_type=str)) tokens = tokenizer.tokenize("▁") self.assertEqual(tokens, ["▁▁"]) self.assertEqual(tokens, tokenizer.sp_model.encode("▁▁", out_type=str)) tokens = tokenizer.tokenize(" ▁") self.assertEqual(tokens, ["▁▁▁"]) self.assertEqual(tokens, tokenizer.sp_model.encode("▁▁▁", out_type=str)) def test_fast_post_processor(self): tokenizer = LlamaTokenizerFast( SAMPLE_VOCAB, eos_token=None, bos_token=None, add_bos_token=False, add_eos_token=False ) tokenizer.encode(" Hey ") with self.assertRaises(ValueError): tokenizer = LlamaTokenizerFast( SAMPLE_VOCAB, bos_token=None, eos_token="<s>", add_bos_token=True, add_eos_token=False ) with self.assertRaises(ValueError): tokenizer = LlamaTokenizerFast(SAMPLE_VOCAB, eos_token=None, add_bos_token=True, add_eos_token=True) @require_jinja def test_tokenization_for_chat(self): tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", legacy=False) test_chats = [ [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}], [ {"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Nice to meet you."}, ], [{"role": "user", "content": "Hello!"}], ] tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats] expected_tokens = [ [1, 29961, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 13563, 7451, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 10994, 29991, 518, 29914, 25580, 29962], [1, 29961, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 13563, 7451, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 10994, 29991, 518, 29914, 25580, 29962, 20103, 304, 5870, 366, 29889, 29871, 2], [1, 29961, 25580, 29962, 15043, 29991, 518, 29914, 25580, 29962] ] for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens): self.assertListEqual(tokenized_chat, expected_tokens) @require_sentencepiece @require_tokenizers class CommonSpmIntegrationTests(unittest.TestCase): @classmethod def setUpClass(cls): tokenizer = LlamaTokenizer(SAMPLE_VOCAB, extra_ids=0, add_bos_token=False, legacy=False) tokenizer.add_special_tokens({"additional_special_tokens": [AddedToken("<s>", rstrip=False, lstrip=False)]}) cls.tokenizer = tokenizer return cls def test_add_dummy_prefix(self): input_ids = self.tokenizer.encode(". Hello") self.assertEqual(input_ids, [7, 4, 156, 86, 20]) sp_encode = self.tokenizer.sp_model.encode(". Hello") self.assertEqual(input_ids, [7] + sp_encode) tokens = self.tokenizer.tokenize(". Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) tokens = self.tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str)) tokens = self.tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str)) tokens = self.tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("▁", out_type=str)) def test_remove_extra_whitespaces(self): input_ids = self.tokenizer.encode(" . Hello") self.assertEqual(input_ids, [7, 4, 156, 86, 20]) sp_encode = self.tokenizer.sp_model.encode(" . Hello") self.assertEqual(input_ids, [7] + sp_encode) tokens = self.tokenizer.tokenize(" . Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) input_ids = self.tokenizer.encode("▁He is not") self.assertEqual(input_ids, [156, 46, 44]) tokens = self.tokenizer.tokenize("▁He is not") sp_encode = [ self.tokenizer.sp_model.piece_to_id("▁He"), self.tokenizer.sp_model.piece_to_id("▁is"), self.tokenizer.sp_model.piece_to_id("▁not"), ] self.assertEqual(input_ids, sp_encode) self.assertEqual(tokens, ["▁He", "▁is", "▁not"]) input_ids = self.tokenizer.encode("▁He is not<s> ▁He") self.assertEqual(input_ids, [156, 46, 44, 1, 156]) tokens = self.tokenizer.tokenize("▁He is not<s> ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "<s>", "▁He"]) input_ids = self.tokenizer.encode("▁He is not ▁He") self.assertEqual(input_ids, [156, 46, 44, 156]) tokens = self.tokenizer.tokenize("▁He is not ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "▁He"]) def test_character_after_special_token(self): input_ids = self.tokenizer.encode("Hey <s>I") self.assertEqual(input_ids, [156, 30, 1, 100]) sp_encode = self.tokenizer.sp_model.encode("Hey .I") self.assertEqual(input_ids[-1], sp_encode[-1]) tokens = self.tokenizer.tokenize("<s>I") self.assertEqual(tokens, ["<s>", "I"]) input_ids = self.tokenizer.encode("Hello, <s>,") self.assertEqual(input_ids, [156, 86, 20, 3, 1, 3]) tokens = self.tokenizer.tokenize("Hello, <s>,") self.assertEqual(tokens, ["▁He", "ll", "o", ",", "<s>", ","]) def test_special_tokens_strip(self): input_ids = self.tokenizer.encode(" <s> ,") self.assertEqual(input_ids, [1, 7, 3]) tokens = self.tokenizer.tokenize(" <s> ,") self.assertEqual(tokens, ["<s>", "▁", ","]) input_ids = self.tokenizer.encode("No <s> ▁He") self.assertEqual(input_ids, [284, 1, 156]) tokens = self.tokenizer.tokenize("No <s> ▁He") self.assertEqual(tokens, ["▁No", "<s>", "▁He"])
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license modeltestermixin testattentionoutputs is expecting attention tensors to be of size numattentionheads encoderseqlength encoderkeylength but longformerselfattention returns attention of shape numattentionheads encoderseqlength self attentionwindow 1 because its local attention only attends to self attentionwindow 1 locations assuming no token with global attention otherwise the last dimension of attentions is x self attentionwindow 1 where x is the number of tokens with global attention replace septokenid by some random id make sure there are exactly three septokenid need to use 0 6 instead of 0 5 for testdiskoffload todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer longformer cannot keep gradients in attentions or hidden states first row 0 4983 2 6918 0 0071 1 0492 0 0000 0 0000 0 0000 last row 0 0000 0 0000 0 0000 2 0514 1 1600 0 5372 0 2629 expected slices across chunk and seq length dim create attn mask create attn mask all tokens with global attention have weight 0 in local attentions the weight of all tokens with local attention must sum to 1 all the global attention weights must sum to 1 hello world hello world repeated 1000 times hello world repeated 1000 times coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license modeltestermixin test_attention_outputs is expecting attention tensors to be of size num_attention_heads encoder_seq_length encoder_key_length but longformerselfattention returns attention of shape num_attention_heads encoder_seq_length self attention_window 1 because its local attention only attends to self attention_window 1 locations assuming no token with global attention otherwise the last dimension of attentions is x self attention_window 1 where x is the number of tokens with global attention replace sep_token_id by some random id make sure there are exactly three sep_token_id pruning is not supported need to use 0 6 instead of 0 5 for test_disk_offload todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer longformer cannot keep gradients in attentions or hidden states set seq length 8 hidden dim 4 first row 0 4983 2 6918 0 0071 1 0492 0 0000 0 0000 0 0000 last row 0 0000 0 0000 0 0000 2 0514 1 1600 0 5372 0 2629 expected slices across chunk and seq length dim create attn mask create attn mask all tokens with global attention have weight 0 in local attentions the weight of all tokens with local attention must sum to 1 all the global attention weights must sum to 1 hello world hello world repeated 1000 times long input set global attention on a few random positions hello world repeated 1000 times long input
import unittest from transformers import LongformerConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerSelfAttention, ) class LongformerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, attention_window=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.attention_window = attention_window self.key_length = self.attention_window + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return LongformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, attention_window=self.attention_window, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_attention_mask_determinism( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] output_without_mask = model(input_ids)["last_hidden_state"] self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4)) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_global_attention_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() global_attention_mask = input_mask.clone() global_attention_mask[:, input_mask.shape[-1] // 2] = 0 global_attention_mask = global_attention_mask.to(torch_device) result = model( input_ids, attention_mask=input_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, ) result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask) result = model(input_ids, global_attention_mask=global_attention_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, global_attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = LongformerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, global_attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs global_attention_mask = torch.zeros_like(input_ids) global_attention_mask[:, -1] = 1 inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, "global_attention_mask": global_attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_question_answering(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs input_ids[input_ids == config.sep_token_id] = torch.randint(0, config.vocab_size, (1,)).item() input_ids[:, -3:] = config.sep_token_id input_mask = torch.ones_like(input_ids) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels @require_torch class LongformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = False all_model_classes = ( ( LongformerModel, LongformerForMaskedLM, LongformerForSequenceClassification, LongformerForQuestionAnswering, LongformerForTokenClassification, LongformerForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LongformerModel, "fill-mask": LongformerForMaskedLM, "question-answering": LongformerForQuestionAnswering, "text-classification": LongformerForSequenceClassification, "token-classification": LongformerForTokenClassification, "zero-shot": LongformerForSequenceClassification, } if is_torch_available() else {} ) model_split_percents = [0.6, 0.7, 0.9] def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): return True return False def setUp(self): self.model_tester = LongformerModelTester(self) self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_attention_mask_determinism(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs) def test_model_global_attention_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_global_attention_mask(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): return @require_torch @require_sentencepiece @require_tokenizers class LongformerModelIntegrationTest(unittest.TestCase): def _get_hidden_states(self): return torch.tensor( [ [ [ 4.98332758e-01, 2.69175139e00, -7.08081422e-03, 1.04915401e00, -1.83476661e00, 7.67220476e-01, 2.98580543e-01, 2.84803992e-02, ], [ -7.58357372e-01, 4.20635998e-01, -4.04739919e-02, 1.59924145e-01, 2.05135748e00, -1.15997978e00, 5.37166397e-01, 2.62873606e-01, ], [ -1.69438001e00, 4.17574660e-01, -1.49196962e00, -1.76483717e00, -1.94566312e-01, -1.71183858e00, 7.72903565e-01, -1.11557056e00, ], [ 5.44028163e-01, 2.05466114e-01, -3.63045868e-01, 2.41865062e-01, 3.20348382e-01, -9.05611176e-01, -1.92690727e-01, -1.19917547e00, ], ] ], dtype=torch.float32, device=torch_device, ) def test_diagonalize(self): hidden_states = self._get_hidden_states() hidden_states = hidden_states.reshape((1, 8, 4)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) window_overlap_size = chunked_hidden_states.shape[2] self.assertTrue(window_overlap_size == 4) padded_hidden_states = LongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states) self.assertTrue(padded_hidden_states.shape[-1] == chunked_hidden_states.shape[-1] + window_overlap_size - 1) self.assertTrue(torch.allclose(padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], atol=1e-3)) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, 0, 4:], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) self.assertTrue(torch.allclose(padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], atol=1e-3)) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, -1, :3], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) def test_pad_and_transpose_last_two_dims(self): hidden_states = self._get_hidden_states() self.assertEqual(hidden_states.shape, (1, 4, 8)) padding = (0, 0, 0, 1) padded_hidden_states = LongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, padding) self.assertEqual(padded_hidden_states.shape, (1, 8, 5)) expected_added_dim = torch.zeros((5,), device=torch_device, dtype=torch.float32) self.assertTrue(torch.allclose(expected_added_dim, padded_hidden_states[0, -1, :], atol=1e-6)) self.assertTrue(torch.allclose(hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], atol=1e-6)) def test_chunk(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) expected_slice_along_seq_length = torch.tensor( [0.4983, -0.7584, -1.6944], device=torch_device, dtype=torch.float32 ) expected_slice_along_chunk = torch.tensor( [0.4983, -1.8348, -0.7584, 2.0514], device=torch_device, dtype=torch.float32 ) self.assertTrue(torch.allclose(chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, atol=1e-3)) self.assertTrue(torch.allclose(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, atol=1e-3)) self.assertEqual(chunked_hidden_states.shape, (1, 3, 4, 4)) def test_mask_invalid_locations(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) hid_states_1 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_1, 1) self.assertTrue(torch.isinf(hid_states_1).sum().item() == 8) hid_states_2 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_2, 2) self.assertTrue(torch.isinf(hid_states_2).sum().item() == 24) hid_states_3 = chunked_hidden_states.clone()[:, :, :, :3] LongformerSelfAttention._mask_invalid_locations(hid_states_3, 2) self.assertTrue(torch.isinf(hid_states_3).sum().item() == 24) hid_states_4 = chunked_hidden_states.clone()[:, :, 2:, :] LongformerSelfAttention._mask_invalid_locations(hid_states_4, 2) self.assertTrue(torch.isinf(hid_states_4).sum().item() == 12) def test_layer_local_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = self._get_hidden_states() batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) attention_mask[:, -2:] = -10000 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (1, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 1], torch.tensor( [0.0019, 0.0122, -0.0171, -0.0256, -0.0300, 0.0173, -0.0115, 0.0048], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_global_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (2, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 2], torch.tensor( [-0.0651, -0.0393, 0.0309, -0.0342, -0.0066, -0.0155, -0.0209, -0.0494], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( output_hidden_states[1, -2], torch.tensor( [-0.0405, -0.0384, 0.0396, -0.0374, -0.0341, 0.0136, 0.0014, -0.0571], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_attn_probs(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states, local_attentions, global_attentions = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=True, ) self.assertEqual(local_attentions.shape, (2, 4, 2, 8)) self.assertEqual(global_attentions.shape, (2, 2, 3, 4)) self.assertTrue(torch.all(local_attentions[0, 2:4, :, :] == 0)) self.assertTrue(torch.all(local_attentions[1, 1:4, :, :] == 0)) self.assertTrue(torch.all(torch.abs(global_attentions[0, :, :2, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue(torch.all(torch.abs(global_attentions[1, :, :1, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( local_attentions[0, 0, 0, :], torch.tensor( [0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( local_attentions[1, 0, 0, :], torch.tensor( [0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue(torch.all(torch.abs(global_attentions.sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( global_attentions[0, 0, 1, :], torch.tensor( [0.2500, 0.2500, 0.2500, 0.2500], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( global_attentions[1, 0, 0, :], torch.tensor( [0.2497, 0.2500, 0.2499, 0.2504], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) @slow def test_inference_no_head(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) input_ids = torch.tensor([[0, 20920, 232, 328, 1437, 2]], dtype=torch.long, device=torch_device) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device) self.assertTrue(torch.allclose(output[0, 0, -5:], expected_output_slice, atol=1e-4)) self.assertTrue(torch.allclose(output_without_mask[0, 0, -5:], expected_output_slice, atol=1e-4)) @slow def test_inference_no_head_long(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask[:, [1, 4, 21]] = 1 output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0] expected_output_sum = torch.tensor(74585.8594, device=torch_device) expected_output_mean = torch.tensor(0.0243, device=torch_device) self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4)) self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4)) @slow def test_inference_masked_lm_long(self): model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) input_ids = input_ids.to(torch_device) loss, prediction_scores = model(input_ids, labels=input_ids).to_tuple() expected_loss = torch.tensor(0.0074, device=torch_device) expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device) expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device) self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-4)) self.assertTrue(torch.allclose(prediction_scores.sum(), expected_prediction_scores_sum, atol=1e-4)) self.assertTrue(torch.allclose(prediction_scores.mean(), expected_prediction_scores_mean, atol=1e-4))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license modeltestermixin testattentionoutputs is expecting attention tensors to be of size numattentionheads encoderseqlength encoderkeylength but tflongformerselfattention returns attention of shape numattentionheads encoderseqlength self attentionwindow 1 because its local attention only attends to self attentionwindow and one before and one after global attention mask has to be partly defined to trace all weights replace septokenid by some random id make sure there are exactly three septokenid todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer first row 0 4983 2 6918 0 0071 1 0492 0 0000 0 0000 0 0000 last row 0 0000 0 0000 0 0000 2 0514 1 1600 0 5372 0 2629 pad along seq length dim expected slices across chunk and seq length dim create attn mask create attn mask the weight of all tokens with local attention must sum to 1 all the global attention weights must sum to 1 hello world hello world repeated 1000 times set global attention on a few random positions assert close hello world repeated 1000 times assert close coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license modeltestermixin test_attention_outputs is expecting attention tensors to be of size num_attention_heads encoder_seq_length encoder_key_length but tflongformerselfattention returns attention of shape num_attention_heads encoder_seq_length self attention_window 1 because its local attention only attends to self attention_window and one before and one after global attention mask has to be partly defined to trace all weights replace sep_token_id by some random id make sure there are exactly three sep_token_id todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer set seq length 8 hidden dim 4 first row 0 4983 2 6918 0 0071 1 0492 0 0000 0 0000 0 0000 last row 0 0000 0 0000 0 0000 2 0514 1 1600 0 5372 0 2629 pad along seq length dim expected slices across chunk and seq length dim create attn mask create attn mask the weight of all tokens with local attention must sum to 1 all the global attention weights must sum to 1 hello world hello world repeated 1000 times set global attention on a few random positions assert close hello world repeated 1000 times assert close
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( LongformerConfig, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerSelfAttention, ) from transformers.tf_utils import shape_list class TFLongformerModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.attention_window = 4 self.key_length = self.attention_window + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = LongformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, attention_window=self.attention_window, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_attention_mask_determinism( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFLongformerModel(config=config) attention_mask = tf.ones(input_ids.shape, dtype=tf.int64) output_with_mask = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] tf.debugging.assert_near(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], rtol=1e-4) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.return_dict = True model = TFLongformerModel(config=config) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertListEqual( shape_list(result.last_hidden_state), [self.batch_size, self.seq_length, self.hidden_size] ) self.parent.assertListEqual(shape_list(result.pooler_output), [self.batch_size, self.hidden_size]) def create_and_check_model_with_global_attention_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.return_dict = True model = TFLongformerModel(config=config) half_input_mask_length = shape_list(input_mask)[-1] // 2 global_attention_mask = tf.concat( [ tf.zeros_like(input_mask)[:, :half_input_mask_length], tf.ones_like(input_mask)[:, half_input_mask_length:], ], axis=-1, ) result = model( input_ids, attention_mask=input_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, ) result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask) result = model(input_ids, global_attention_mask=global_attention_mask) self.parent.assertListEqual( shape_list(result.last_hidden_state), [self.batch_size, self.seq_length, self.hidden_size] ) self.parent.assertListEqual(shape_list(result.pooler_output), [self.batch_size, self.hidden_size]) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.return_dict = True model = TFLongformerForMaskedLM(config=config) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertListEqual(shape_list(result.logits), [self.batch_size, self.seq_length, self.vocab_size]) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.return_dict = True model = TFLongformerForQuestionAnswering(config=config) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertListEqual(shape_list(result.start_logits), [self.batch_size, self.seq_length]) self.parent.assertListEqual(shape_list(result.end_logits), [self.batch_size, self.seq_length]) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFLongformerForSequenceClassification(config=config) output = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels ).logits self.parent.assertListEqual(shape_list(output), [self.batch_size, self.num_labels]) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFLongformerForTokenClassification(config=config) output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels).logits self.parent.assertListEqual(shape_list(output), [self.batch_size, self.seq_length, self.num_labels]) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFLongformerForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) output = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, global_attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ).logits self.parent.assertListEqual(list(output.shape), [self.batch_size, self.num_choices]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs global_attention_mask = tf.concat( [tf.zeros_like(input_ids)[:, :-1], tf.ones_like(input_ids)[:, -1:]], axis=-1, ) inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, "global_attention_mask": global_attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_question_answering(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs input_ids = tf.where(input_ids == config.sep_token_id, 0, input_ids) input_ids = tf.concat([input_ids[:, :-3], tf.ones_like(input_ids)[:, -3:] * config.sep_token_id], axis=-1) input_mask = tf.ones_like(input_ids) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels @require_tf class TFLongformerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFLongformerModel, TFLongformerForMaskedLM, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForMultipleChoice, TFLongformerForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFLongformerModel, "fill-mask": TFLongformerForMaskedLM, "question-answering": TFLongformerForQuestionAnswering, "text-classification": TFLongformerForSequenceClassification, "token-classification": TFLongformerForTokenClassification, "zero-shot": TFLongformerForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): return True return False def setUp(self): self.model_tester = TFLongformerModelTester(self) self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model_attention_mask_determinism(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_global_attention_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_global_attention_mask(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) @unittest.skip("Longformer keeps using potentially symbolic tensors in conditionals and breaks tracing.") def test_saved_model_creation(self): pass @unittest.skip("Longformer keeps using potentially symbolic tensors in conditionals and breaks tracing.") def test_compile_tf_model(self): pass @require_tf @require_sentencepiece @require_tokenizers class TFLongformerModelIntegrationTest(unittest.TestCase): def _get_hidden_states(self): return tf.convert_to_tensor( [ [ [ 4.98332758e-01, 2.69175139e00, -7.08081422e-03, 1.04915401e00, -1.83476661e00, 7.67220476e-01, 2.98580543e-01, 2.84803992e-02, ], [ -7.58357372e-01, 4.20635998e-01, -4.04739919e-02, 1.59924145e-01, 2.05135748e00, -1.15997978e00, 5.37166397e-01, 2.62873606e-01, ], [ -1.69438001e00, 4.17574660e-01, -1.49196962e00, -1.76483717e00, -1.94566312e-01, -1.71183858e00, 7.72903565e-01, -1.11557056e00, ], [ 5.44028163e-01, 2.05466114e-01, -3.63045868e-01, 2.41865062e-01, 3.20348382e-01, -9.05611176e-01, -1.92690727e-01, -1.19917547e00, ], ] ], dtype=tf.float32, ) def test_diagonalize(self): hidden_states = self._get_hidden_states() hidden_states = tf.reshape(hidden_states, (1, 8, 4)) chunked_hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) window_overlap_size = shape_list(chunked_hidden_states)[2] self.assertTrue(window_overlap_size == 4) padded_hidden_states = TFLongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states) self.assertTrue( shape_list(padded_hidden_states)[-1] == shape_list(chunked_hidden_states)[-1] + window_overlap_size - 1 ) tf.debugging.assert_near(padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], rtol=1e-3) tf.debugging.assert_near(padded_hidden_states[0, 0, 0, 4:], tf.zeros((3,), dtype=tf.float32), rtol=1e-3) tf.debugging.assert_near(padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], rtol=1e-3) tf.debugging.assert_near(padded_hidden_states[0, 0, -1, :3], tf.zeros((3,), dtype=tf.float32), rtol=1e-3) def test_pad_and_transpose_last_two_dims(self): hidden_states = self._get_hidden_states() self.assertEqual(shape_list(hidden_states), [1, 4, 8]) paddings = tf.constant([[0, 0], [0, 0], [0, 1], [0, 0]], dtype=tf.int64) hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) padded_hidden_states = TFLongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, paddings) self.assertTrue(shape_list(padded_hidden_states) == [1, 1, 8, 5]) expected_added_dim = tf.zeros((5,), dtype=tf.float32) tf.debugging.assert_near(expected_added_dim, padded_hidden_states[0, 0, -1, :], rtol=1e-6) tf.debugging.assert_near( hidden_states[0, 0, -1, :], tf.reshape(padded_hidden_states, (1, -1))[0, 24:32], rtol=1e-6 ) def test_mask_invalid_locations(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = tf.reshape(hidden_states, (batch_size, seq_length, hidden_size)) hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) hid_states_1 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states, 1) hid_states_2 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states, 2) hid_states_3 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states[:, :, :, :3], 2) hid_states_4 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states[:, :, 2:, :], 2) self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_1), tf.int64)) == 8) self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_2), tf.int64)) == 24) self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_3), tf.int64)) == 24) self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_4), tf.int64)) == 12) def test_chunk(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = tf.reshape(hidden_states, (batch_size, seq_length, hidden_size)) chunked_hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) expected_slice_along_seq_length = tf.convert_to_tensor([0.4983, -0.7584, -1.6944], dtype=tf.float32) expected_slice_along_chunk = tf.convert_to_tensor([0.4983, -1.8348, -0.7584, 2.0514], dtype=tf.float32) self.assertTrue(shape_list(chunked_hidden_states) == [1, 3, 4, 4]) tf.debugging.assert_near( chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, rtol=1e-3, atol=1e-4 ) tf.debugging.assert_near(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, rtol=1e-3, atol=1e-4) def test_layer_local_attn(self): model = TFLongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") layer = model.longformer.encoder.layer[0].attention.self_attention hidden_states = self._get_hidden_states() batch_size, seq_length, hidden_size = hidden_states.shape attention_mask = tf.zeros((batch_size, seq_length), dtype=tf.float32) is_index_global_attn = tf.math.greater(attention_mask, 1) is_global_attn = tf.math.reduce_any(is_index_global_attn) attention_mask = tf.where(tf.range(4)[None, :, None, None] > 1, -10000.0, attention_mask[:, :, None, None]) is_index_masked = tf.math.less(attention_mask[:, :, 0, 0], 0) layer_head_mask = None output_hidden_states = layer( [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn] )[0] expected_slice = tf.convert_to_tensor( [0.00188, 0.012196, -0.017051, -0.025571, -0.02996, 0.017297, -0.011521, 0.004848], dtype=tf.float32 ) self.assertEqual(output_hidden_states.shape, (1, 4, 8)) tf.debugging.assert_near(output_hidden_states[0, 1], expected_slice, rtol=1e-3, atol=1e-4) def test_layer_global_attn(self): model = TFLongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") layer = model.longformer.encoder.layer[0].attention.self_attention hidden_states = self._get_hidden_states() hidden_states = tf.concat([self._get_hidden_states(), self._get_hidden_states() - 0.5], axis=0) batch_size, seq_length, hidden_size = hidden_states.shape attention_mask_1 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_2 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 1, 10000.0, attention_mask_1) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 2, -10000.0, attention_mask_1) attention_mask_2 = tf.where(tf.range(4)[None, :, None, None] > 0, 10000.0, attention_mask_2) attention_mask = tf.concat([attention_mask_1, attention_mask_2], axis=0) is_index_masked = tf.math.less(attention_mask[:, :, 0, 0], 0) is_index_global_attn = tf.math.greater(attention_mask[:, :, 0, 0], 0) is_global_attn = tf.math.reduce_any(is_index_global_attn) layer_head_mask = None output_hidden_states = layer( [ hidden_states, -tf.math.abs(attention_mask), layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn, ] )[0] self.assertEqual(output_hidden_states.shape, (2, 4, 8)) expected_slice_0 = tf.convert_to_tensor( [-0.06508, -0.039306, 0.030934, -0.03417, -0.00656, -0.01553, -0.02088, -0.04938], dtype=tf.float32 ) expected_slice_1 = tf.convert_to_tensor( [-0.04055, -0.038399, 0.0396, -0.03735, -0.03415, 0.01357, 0.00145, -0.05709], dtype=tf.float32 ) tf.debugging.assert_near(output_hidden_states[0, 2], expected_slice_0, rtol=1e-3, atol=1e-4) tf.debugging.assert_near(output_hidden_states[1, -2], expected_slice_1, rtol=1e-3, atol=1e-4) def test_layer_attn_probs(self): model = TFLongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") layer = model.longformer.encoder.layer[0].attention.self_attention hidden_states = tf.concat([self._get_hidden_states(), self._get_hidden_states() - 0.5], axis=0) batch_size, seq_length, hidden_size = hidden_states.shape attention_mask_1 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_2 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 1, 10000.0, attention_mask_1) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 2, -10000.0, attention_mask_1) attention_mask_2 = tf.where(tf.range(4)[None, :, None, None] > 0, 10000.0, attention_mask_2) attention_mask = tf.concat([attention_mask_1, attention_mask_2], axis=0) is_index_masked = tf.math.less(attention_mask[:, :, 0, 0], 0) is_index_global_attn = tf.math.greater(attention_mask[:, :, 0, 0], 0) is_global_attn = tf.math.reduce_any(is_index_global_attn) layer_head_mask = None output_hidden_states, local_attentions, global_attentions = layer( [ hidden_states, -tf.math.abs(attention_mask), layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn, ] ) self.assertEqual(local_attentions.shape, (2, 4, 2, 8)) self.assertEqual(global_attentions.shape, (2, 2, 3, 4)) self.assertTrue((local_attentions[0, 2:4, :, :] == 0).numpy().tolist()) self.assertTrue((local_attentions[1, 1:4, :, :] == 0).numpy().tolist()) self.assertTrue( (tf.math.abs(tf.math.reduce_sum(global_attentions[0, :, :2, :], axis=-1) - 1) < 1e-6).numpy().tolist() ) self.assertTrue( (tf.math.abs(tf.math.reduce_sum(global_attentions[1, :, :1, :], axis=-1) - 1) < 1e-6).numpy().tolist() ) tf.debugging.assert_near( local_attentions[0, 0, 0, :], tf.convert_to_tensor([0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000], dtype=tf.float32), rtol=1e-3, atol=1e-4, ) tf.debugging.assert_near( local_attentions[1, 0, 0, :], tf.convert_to_tensor([0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000], dtype=tf.float32), rtol=1e-3, atol=1e-4, ) self.assertTrue((tf.math.abs(tf.math.reduce_sum(global_attentions, axis=-1) - 1) < 1e-6).numpy().tolist()) tf.debugging.assert_near( global_attentions[0, 0, 1, :], tf.convert_to_tensor([0.2500, 0.2500, 0.2500, 0.2500], dtype=tf.float32), rtol=1e-3, atol=1e-4, ) tf.debugging.assert_near( global_attentions[1, 0, 0, :], tf.convert_to_tensor([0.2497, 0.2500, 0.2499, 0.2504], dtype=tf.float32), rtol=1e-3, atol=1e-4, ) @slow def test_inference_no_head(self): model = TFLongformerModel.from_pretrained("allenai/longformer-base-4096") input_ids = tf.convert_to_tensor([[0, 20920, 232, 328, 1437, 2]], dtype=tf.int64) attention_mask = tf.ones(shape_list(input_ids), dtype=tf.int64) output = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] expected_output_slice = tf.convert_to_tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], dtype=tf.float32) tf.debugging.assert_near(output[0, 0, -5:], expected_output_slice, rtol=1e-3, atol=1e-4) tf.debugging.assert_near(output_without_mask[0, 0, -5:], expected_output_slice, rtol=1e-3, atol=1e-4) @slow def test_inference_no_head_long(self): model = TFLongformerModel.from_pretrained("allenai/longformer-base-4096") input_ids = tf.convert_to_tensor([[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=tf.int64) attention_mask = tf.ones(shape_list(input_ids), dtype=tf.int64) global_attention_mask = tf.zeros(shape_list(input_ids), dtype=tf.int64) global_attention_mask = tf.tensor_scatter_nd_update( global_attention_mask, tf.constant([[0, 1], [0, 4], [0, 21]], dtype=tf.int64), tf.constant([1, 1, 1], dtype=tf.int64), ) output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0] expected_output_sum = tf.constant(74585.875) expected_output_mean = tf.constant(0.024267) tf.debugging.assert_near(tf.reduce_sum(output), expected_output_sum, rtol=1e-4, atol=1e-4) tf.debugging.assert_near(tf.reduce_mean(output), expected_output_mean, rtol=1e-4, atol=1e-4) @slow def test_inference_masked_lm_long(self): model = TFLongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") input_ids = tf.convert_to_tensor([[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=tf.int64) output = model(input_ids, labels=input_ids) loss = output.loss prediction_scores = output.logits expected_loss = tf.constant(0.0073798) expected_prediction_scores_sum = tf.constant(-610476600.0) expected_prediction_scores_mean = tf.constant(-3.03477) tf.debugging.assert_near(tf.reduce_mean(loss), expected_loss, rtol=1e-4, atol=1e-4) tf.debugging.assert_near( tf.reduce_sum(prediction_scores), expected_prediction_scores_sum, rtol=1e-4, atol=1e-4 ) tf.debugging.assert_near( tf.reduce_mean(prediction_scores), expected_prediction_scores_mean, rtol=1e-4, atol=1e-4 ) @slow def test_inference_masked_lm(self): model = TFLongformerForMaskedLM.from_pretrained("lysandre/tiny-longformer-random") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 10] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) expected_slice = tf.constant( [ [ [-0.04926379, 0.0367098, 0.02099686], [0.03940692, 0.01547744, -0.01448723], [0.03495252, -0.05900355, -0.01675752], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch luke model import unittest from transformers import lukeconfig istorchavailable from transformers testingutils import requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import lukeforentityclassification lukeforentitypairclassification lukeforentityspanclassification lukeformaskedlm lukeformultiplechoice lukeforquestionanswering lukeforsequenceclassification lukefortokenclassification lukemodel luketokenizer from transformers models luke modelingluke import lukepretrainedmodelarchivelist class lukemodeltester def init self parent batchsize13 seqlength7 istrainingtrue entitylength3 mentionlength5 useattentionmasktrue usetokentypeidstrue useentityidstrue useentityattentionmasktrue useentitytokentypeidstrue useentitypositionidstrue uselabelstrue vocabsize99 entityvocabsize10 entityembsize6 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 numentityclassificationlabels9 numentitypairclassificationlabels6 numentityspanclassificationlabels4 useentityawareattentiontrue scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self entitylength entitylength self mentionlength mentionlength self useattentionmask useattentionmask self usetokentypeids usetokentypeids self useentityids useentityids self useentityattentionmask useentityattentionmask self useentitytokentypeids useentitytokentypeids self useentitypositionids useentitypositionids self uselabels uselabels self vocabsize vocabsize self entityvocabsize entityvocabsize self entityembsize entityembsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self numentityclassificationlabels numentityclassificationlabels self numentitypairclassificationlabels numentitypairclassificationlabels self numentityspanclassificationlabels numentityspanclassificationlabels self scope scope self useentityawareattention useentityawareattention self encoderseqlength seqlength self keylength seqlength self numhiddenstatestypes 2 hiddenstates and entityhiddenstates def prepareconfigandinputsself prepare words inputids idstensorself batchsize self seqlength self vocabsize attentionmask none if self useattentionmask attentionmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize prepare entities entityids idstensorself batchsize self entitylength self entityvocabsize entityattentionmask none if self useentityattentionmask entityattentionmask randomattentionmaskself batchsize self entitylength entitytokentypeids none if self usetokentypeids entitytokentypeids idstensorself batchsize self entitylength self typevocabsize entitypositionids none if self useentitypositionids entitypositionids idstensor self batchsize self entitylength self mentionlength self mentionlength sequencelabels none tokenlabels none choicelabels none entitylabels none entityclassificationlabels none entitypairclassificationlabels none entityspanclassificationlabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices entitylabels idstensorself batchsize self entitylength self entityvocabsize entityclassificationlabels idstensorself batchsize self numentityclassificationlabels entitypairclassificationlabels idstensor self batchsize self numentitypairclassificationlabels entityspanclassificationlabels idstensor self batchsize self entitylength self numentityspanclassificationlabels config self getconfig return config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels def getconfigself return lukeconfig vocabsizeself vocabsize entityvocabsizeself entityvocabsize entityembsizeself entityembsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange useentityawareattentionself useentityawareattention def createandcheckmodel self config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels model lukemodelconfigconfig model totorchdevice model eval test with words entities result model inputids attentionmaskattentionmask tokentypeidstokentypeids entityidsentityids entityattentionmaskentityattentionmask entitytokentypeidsentitytokentypeids entitypositionidsentitypositionids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequal result entitylasthiddenstate shape self batchsize self entitylength self hiddensize test with words only result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckformaskedlm self config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels config numlabels self numentityclassificationlabels model lukeformaskedlmconfig model totorchdevice model eval result model inputids attentionmaskattentionmask tokentypeidstokentypeids entityidsentityids entityattentionmaskentityattentionmask entitytokentypeidsentitytokentypeids entitypositionidsentitypositionids labelstokenlabels entitylabelsentitylabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize if entityids is not none self parent assertequal result entitylogits shape self batchsize self entitylength self entityvocabsize else self parent assertisnoneresult entitylogits def createandcheckforentityclassification self config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels config numlabels self numentityclassificationlabels model lukeforentityclassificationconfig model totorchdevice model eval result model inputids attentionmaskattentionmask tokentypeidstokentypeids entityidsentityids entityattentionmaskentityattentionmask entitytokentypeidsentitytokentypeids entitypositionidsentitypositionids labelsentityclassificationlabels self parent assertequalresult logits shape self batchsize self numentityclassificationlabels def createandcheckforentitypairclassification self config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels config numlabels self numentitypairclassificationlabels model lukeforentityclassificationconfig model totorchdevice model eval result model inputids attentionmaskattentionmask tokentypeidstokentypeids entityidsentityids entityattentionmaskentityattentionmask entitytokentypeidsentitytokentypeids entitypositionidsentitypositionids labelsentitypairclassificationlabels self parent assertequalresult logits shape self batchsize self numentitypairclassificationlabels def createandcheckforentityspanclassification self config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels config numlabels self numentityspanclassificationlabels model lukeforentityspanclassificationconfig model totorchdevice model eval entitystartpositions idstensorself batchsize self entitylength self seqlength entityendpositions idstensorself batchsize self entitylength self seqlength result model inputids attentionmaskattentionmask tokentypeidstokentypeids entityidsentityids entityattentionmaskentityattentionmask entitytokentypeidsentitytokentypeids entitypositionidsentitypositionids entitystartpositionsentitystartpositions entityendpositionsentityendpositions labelsentityspanclassificationlabels self parent assertequal result logits shape self batchsize self entitylength self numentityspanclassificationlabels def createandcheckforquestionanswering self config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels model lukeforquestionansweringconfigconfig model totorchdevice model eval result model inputids attentionmaskattentionmask tokentypeidstokentypeids entityidsentityids entityattentionmaskentityattentionmask entitytokentypeidsentitytokentypeids entitypositionidsentitypositionids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforsequenceclassification self config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels config numlabels self numlabels model lukeforsequenceclassificationconfig model totorchdevice model eval result model inputids attentionmaskattentionmask tokentypeidstokentypeids entityidsentityids entityattentionmaskentityattentionmask entitytokentypeidsentitytokentypeids entitypositionidsentitypositionids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels config numlabels self numlabels model lukefortokenclassificationconfigconfig model totorchdevice model eval result model inputids attentionmaskattentionmask tokentypeidstokentypeids entityidsentityids entityattentionmaskentityattentionmask entitytokentypeidsentitytokentypeids entitypositionidsentitypositionids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckformultiplechoice self config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels config numchoices self numchoices model lukeformultiplechoiceconfigconfig model totorchdevice model eval multiplechoiceinputsids inputids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoicetokentypeids tokentypeids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceattentionmask attentionmask unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceentityids entityids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceentitytokentypeids entitytokentypeids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceentityattentionmask entityattentionmask unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceentitypositionids entitypositionids unsqueeze1 expand1 self numchoices 1 1 contiguous result model multiplechoiceinputsids attentionmaskmultiplechoiceattentionmask tokentypeidsmultiplechoicetokentypeids entityidsmultiplechoiceentityids entityattentionmaskmultiplechoiceentityattentionmask entitytokentypeidsmultiplechoiceentitytokentypeids entitypositionidsmultiplechoiceentitypositionids labelschoicelabels self parent assertequalresult logits shape self batchsize self numchoices def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask tokentypeids entityids entityattentionmask entitytokentypeids entitypositionids sequencelabels tokenlabels choicelabels entitylabels entityclassificationlabels entitypairclassificationlabels entityspanclassificationlabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask attentionmask entityids entityids entitytokentypeids entitytokentypeids entityattentionmask entityattentionmask entitypositionids entitypositionids return config inputsdict requiretorch class lukemodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses lukemodel lukeformaskedlm lukeforentityclassification lukeforentitypairclassification lukeforentityspanclassification lukeforquestionanswering lukeforsequenceclassification lukefortokenclassification lukeformultiplechoice if istorchavailable else pipelinemodelmapping featureextraction lukemodel fillmask lukeformaskedlm questionanswering lukeforquestionanswering textclassification lukeforsequenceclassification tokenclassification lukefortokenclassification zeroshot lukeforsequenceclassification if istorchavailable else testpruning false testtorchscript false testresizeembeddings true testheadmasking true todo fix the failed tests def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname if pipelinetestcassename in qapipelinetests zeroshotclassificationpipelinetests return true return false def prepareforclassself inputsdict modelclass returnlabelsfalse entityinputsdict k v for k v in inputsdict items if k startswithentity inputsdict k v for k v in inputsdict items if not k startswithentity inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if modelclass lukeformultiplechoice entityinputsdict k v unsqueeze1 expand1 self modeltester numchoices 1 contiguous if v ndim 2 else v unsqueeze1 expand1 self modeltester numchoices 1 1 contiguous for k v in entityinputsdict items inputsdict updateentityinputsdict if modelclass lukeforentityspanclassification inputsdictentitystartpositions torch zeros self modeltester batchsize self modeltester entitylength dtypetorch long devicetorchdevice inputsdictentityendpositions torch ones self modeltester batchsize self modeltester entitylength dtypetorch long devicetorchdevice if returnlabels if modelclass in lukeforentityclassification lukeforentitypairclassification lukeforsequenceclassification lukeformultiplechoice inputsdictlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice elif modelclass lukeforentityspanclassification inputsdictlabels torch zeros self modeltester batchsize self modeltester entitylength dtypetorch long devicetorchdevice elif modelclass lukefortokenclassification inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice elif modelclass lukeformaskedlm inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice inputsdictentitylabels torch zeros self modeltester batchsize self modeltester entitylength dtypetorch long devicetorchdevice return inputsdict def setupself self modeltester lukemodeltesterself self configtester configtesterself configclasslukeconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs slow def testmodelfrompretrainedself for modelname in lukepretrainedmodelarchivelist model lukemodel frompretrainedmodelname self assertisnotnonemodel def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testformaskedlmwithwordonlyself configandinputs self modeltester prepareconfigandinputs configandinputs configandinputs 4 none lenconfigandinputs4 self modeltester createandcheckformaskedlmconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testforentityclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforentityclassificationconfigandinputs def testforentitypairclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforentitypairclassificationconfigandinputs def testforentityspanclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforentityspanclassificationconfigandinputs def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlength self modeltester seqlength entitylength self modeltester entitylength keylength seqlength entitylength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads seqlength entitylength keylength outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass addedhiddenstates self modeltester numhiddenstatestypes self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads seqlength entitylength keylength def testentityhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass entityhiddenstates outputs entityhiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenentityhiddenstates expectednumlayers entitylength self modeltester entitylength self assertlistequal listentityhiddenstates0 shape2 entitylength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testretaingradentityhiddenstatesself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice inputs self prepareforclassinputsdict modelclass outputs modelinputs output outputs0 entityhiddenstates outputs entityhiddenstates0 entityhiddenstates retaingrad output flatten0 backwardretaingraphtrue self assertisnotnoneentityhiddenstates grad unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass requiretorch class lukemodelintegrationtestsunittest testcase slow def testinferencebasemodelself model lukemodel frompretrainedstudioousialukebase eval model totorchdevice tokenizer luketokenizer frompretrainedstudioousialukebase taskentityclassification text top seed ana ivanovic said on thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second round exit at wimbledon span 39 42 encoding tokenizertext entityspansspan addprefixspacetrue returntensorspt move all values to device for key value in encoding items encodingkey encodingkey totorchdevice outputs modelencoding verify word hidden states expectedshape torch size1 42 768 self assertequaloutputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 0037 0 1368 0 0091 0 1099 0 3329 0 1095 0 0765 0 5335 0 1179 totorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 verify entity hidden states expectedshape torch size1 1 768 self assertequaloutputs entitylasthiddenstate shape expectedshape expectedslice torch tensor0 1457 0 1044 0 0174 totorchdevice self asserttruetorch allcloseoutputs entitylasthiddenstate0 3 3 expectedslice atol1e4 slow def testinferencelargemodelself model lukemodel frompretrainedstudioousialukelarge eval model totorchdevice tokenizer luketokenizer frompretrainedstudioousialukelarge taskentityclassification text top seed ana ivanovic said on thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second round exit at wimbledon span 39 42 encoding tokenizertext entityspansspan addprefixspacetrue returntensorspt move all values to device for key value in encoding items encodingkey encodingkey totorchdevice outputs modelencoding verify word hidden states expectedshape torch size1 42 1024 self assertequaloutputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 0133 0 0865 0 0095 0 3093 0 2576 0 7418 0 1720 0 2117 0 2869 totorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 verify entity hidden states expectedshape torch size1 1 1024 self assertequaloutputs entitylasthiddenstate shape expectedshape expectedslice torch tensor0 0466 0 0106 0 0179 totorchdevice self asserttruetorch allcloseoutputs entitylasthiddenstate0 3 3 expectedslice atol1e4 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch luke model hidden_states and entity_hidden_states prepare words prepare entities test with words entities test with words only todo fix the failed tests check that output_attentions also work using config check attention is always last and order is fine check that output_hidden_states also work using config no need to test all models as different heads yield the same functionality move all values to device verify word hidden states verify entity hidden states move all values to device verify word hidden states verify entity hidden states
import unittest from transformers import LukeConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukeTokenizer, ) from transformers.models.luke.modeling_luke import LUKE_PRETRAINED_MODEL_ARCHIVE_LIST class LukeModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, entity_length=3, mention_length=5, use_attention_mask=True, use_token_type_ids=True, use_entity_ids=True, use_entity_attention_mask=True, use_entity_token_type_ids=True, use_entity_position_ids=True, use_labels=True, vocab_size=99, entity_vocab_size=10, entity_emb_size=6, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, num_entity_classification_labels=9, num_entity_pair_classification_labels=6, num_entity_span_classification_labels=4, use_entity_aware_attention=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.entity_length = entity_length self.mention_length = mention_length self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_entity_ids = use_entity_ids self.use_entity_attention_mask = use_entity_attention_mask self.use_entity_token_type_ids = use_entity_token_type_ids self.use_entity_position_ids = use_entity_position_ids self.use_labels = use_labels self.vocab_size = vocab_size self.entity_vocab_size = entity_vocab_size self.entity_emb_size = entity_emb_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.num_entity_classification_labels = num_entity_classification_labels self.num_entity_pair_classification_labels = num_entity_pair_classification_labels self.num_entity_span_classification_labels = num_entity_span_classification_labels self.scope = scope self.use_entity_aware_attention = use_entity_aware_attention self.encoder_seq_length = seq_length self.key_length = seq_length self.num_hidden_states_types = 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) entity_ids = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size) entity_attention_mask = None if self.use_entity_attention_mask: entity_attention_mask = random_attention_mask([self.batch_size, self.entity_length]) entity_token_type_ids = None if self.use_token_type_ids: entity_token_type_ids = ids_tensor([self.batch_size, self.entity_length], self.type_vocab_size) entity_position_ids = None if self.use_entity_position_ids: entity_position_ids = ids_tensor( [self.batch_size, self.entity_length, self.mention_length], self.mention_length ) sequence_labels = None token_labels = None choice_labels = None entity_labels = None entity_classification_labels = None entity_pair_classification_labels = None entity_span_classification_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) entity_labels = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size) entity_classification_labels = ids_tensor([self.batch_size], self.num_entity_classification_labels) entity_pair_classification_labels = ids_tensor( [self.batch_size], self.num_entity_pair_classification_labels ) entity_span_classification_labels = ids_tensor( [self.batch_size, self.entity_length], self.num_entity_span_classification_labels ) config = self.get_config() return ( config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ) def get_config(self): return LukeConfig( vocab_size=self.vocab_size, entity_vocab_size=self.entity_vocab_size, entity_emb_size=self.entity_emb_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, use_entity_aware_attention=self.use_entity_aware_attention, ) def create_and_check_model( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): model = LukeModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual( result.entity_last_hidden_state.shape, (self.batch_size, self.entity_length, self.hidden_size) ) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_entity_classification_labels model = LukeForMaskedLM(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=token_labels, entity_labels=entity_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) if entity_ids is not None: self.parent.assertEqual( result.entity_logits.shape, (self.batch_size, self.entity_length, self.entity_vocab_size) ) else: self.parent.assertIsNone(result.entity_logits) def create_and_check_for_entity_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_entity_classification_labels model = LukeForEntityClassification(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=entity_classification_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_classification_labels)) def create_and_check_for_entity_pair_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_entity_pair_classification_labels model = LukeForEntityClassification(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=entity_pair_classification_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_pair_classification_labels)) def create_and_check_for_entity_span_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_entity_span_classification_labels model = LukeForEntitySpanClassification(config) model.to(torch_device) model.eval() entity_start_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length) entity_end_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length) result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, entity_start_positions=entity_start_positions, entity_end_positions=entity_end_positions, labels=entity_span_classification_labels, ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.entity_length, self.num_entity_span_classification_labels) ) def create_and_check_for_question_answering( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): model = LukeForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_labels model = LukeForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_labels model = LukeForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_choices = self.num_choices model = LukeForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_attention_mask = attention_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_entity_ids = entity_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_entity_token_type_ids = ( entity_token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() ) multiple_choice_entity_attention_mask = ( entity_attention_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() ) multiple_choice_entity_position_ids = ( entity_position_ids.unsqueeze(1).expand(-1, self.num_choices, -1, -1).contiguous() ) result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_attention_mask, token_type_ids=multiple_choice_token_type_ids, entity_ids=multiple_choice_entity_ids, entity_attention_mask=multiple_choice_entity_attention_mask, entity_token_type_ids=multiple_choice_entity_token_type_ids, entity_position_ids=multiple_choice_entity_position_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "entity_ids": entity_ids, "entity_token_type_ids": entity_token_type_ids, "entity_attention_mask": entity_attention_mask, "entity_position_ids": entity_position_ids, } return config, inputs_dict @require_torch class LukeModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( LukeModel, LukeForMaskedLM, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LukeModel, "fill-mask": LukeForMaskedLM, "question-answering": LukeForQuestionAnswering, "text-classification": LukeForSequenceClassification, "token-classification": LukeForTokenClassification, "zero-shot": LukeForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = True test_head_masking = True def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name in ["QAPipelineTests", "ZeroShotClassificationPipelineTests"]: return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): entity_inputs_dict = {k: v for k, v in inputs_dict.items() if k.startswith("entity")} inputs_dict = {k: v for k, v in inputs_dict.items() if not k.startswith("entity")} inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if model_class == LukeForMultipleChoice: entity_inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if v.ndim == 2 else v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1, -1).contiguous() for k, v in entity_inputs_dict.items() } inputs_dict.update(entity_inputs_dict) if model_class == LukeForEntitySpanClassification: inputs_dict["entity_start_positions"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device ) inputs_dict["entity_end_positions"] = torch.ones( (self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device ) if return_labels: if model_class in ( LukeForEntityClassification, LukeForEntityPairClassification, LukeForSequenceClassification, LukeForMultipleChoice, ): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class == LukeForEntitySpanClassification: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device, ) elif model_class == LukeForTokenClassification: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) elif model_class == LukeForMaskedLM: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["entity_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device, ) return inputs_dict def setUp(self): self.model_tester = LukeModelTester(self) self.config_tester = ConfigTester(self, config_class=LukeConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in LUKE_PRETRAINED_MODEL_ARCHIVE_LIST: model = LukeModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_masked_lm_with_word_only(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = (*config_and_inputs[:4], *((None,) * len(config_and_inputs[4:]))) self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_entity_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_entity_classification(*config_and_inputs) def test_for_entity_pair_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_entity_pair_classification(*config_and_inputs) def test_for_entity_span_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_entity_span_classification(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = self.model_tester.seq_length entity_length = self.model_tester.entity_length key_length = seq_length + entity_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length + entity_length, key_length], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = self.model_tester.num_hidden_states_types self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length + entity_length, key_length], ) def test_entity_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) entity_hidden_states = outputs.entity_hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(entity_hidden_states), expected_num_layers) entity_length = self.model_tester.entity_length self.assertListEqual( list(entity_hidden_states[0].shape[-2:]), [entity_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_entity_hidden_states(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] entity_hidden_states = outputs.entity_hidden_states[0] entity_hidden_states.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(entity_hidden_states.grad) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class LukeModelIntegrationTests(unittest.TestCase): @slow def test_inference_base_model(self): model = LukeModel.from_pretrained("studio-ousia/luke-base").eval() model.to(torch_device) tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification") text = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt") for key, value in encoding.items(): encoding[key] = encoding[key].to(torch_device) outputs = model(**encoding) expected_shape = torch.Size((1, 42, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) expected_shape = torch.Size((1, 1, 768)) self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape) expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]).to(torch_device) self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_large_model(self): model = LukeModel.from_pretrained("studio-ousia/luke-large").eval() model.to(torch_device) tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large", task="entity_classification") text = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt") for key, value in encoding.items(): encoding[key] = encoding[key].to(torch_device) outputs = model(**encoding) expected_shape = torch.Size((1, 42, 1024)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) expected_shape = torch.Size((1, 1, 1024)) self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape) expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]).to(torch_device) self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
codingutf8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing encoder arguments testing spaces after special tokens tokentypeids should put 0 everywhere tokentypeids should put 0 everywhere attentionmask should put 1 everywhere so sum over length should be 1 rust correctly handles the space before the mask while python doesnt test with a sentence with no entity head and tail information fmt off fmt on fmt off fmt on test words test entities fmt off fmt on fmt off fmt on test words test entities test words test entities fmt off fmt on entity information test words test entities head and tail information fmt off fmt on head and tail information test words test entities fmt off fmt on test words test entities coding utf 8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license add_prefix_space true testing encoder arguments testing spaces after special tokens mask token has a left space token_type_ids should put 0 everywhere token_type_ids should put 0 everywhere attention_mask should put 1 everywhere so sum over length should be 1 rust correctly handles the space before the mask while python doesnt test with a sentence with no entity head and tail information fmt off fmt on fmt off fmt on test words test entities fmt off fmt on fmt off fmt on test words test entities test words test entities fmt off fmt on entity information test words test entities head and tail information fmt off fmt on head and tail information test words test entities fmt off fmt on test words test entities
import unittest from typing import Tuple from transformers import AddedToken, LukeTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/vocab.json") SAMPLE_MERGE_FILE = get_tests_dir("fixtures/merges.txt") SAMPLE_ENTITY_VOCAB = get_tests_dir("fixtures/test_entity_vocab.json") class LukeTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LukeTokenizer test_rust_tokenizer = False from_pretrained_kwargs = {"cls_token": "<s>"} def setUp(self): super().setUp() self.special_tokens_map = {"entity_token_1": "<ent>", "entity_token_2": "<ent2>"} def get_tokenizer(self, task=None, **kwargs): kwargs.update(self.special_tokens_map) tokenizer = LukeTokenizer( vocab_file=SAMPLE_VOCAB, merges_file=SAMPLE_MERGE_FILE, entity_vocab_file=SAMPLE_ENTITY_VOCAB, task=task, **kwargs, ) return tokenizer def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.get_tokenizer() text = "lower newer" bpe_tokens = ["l", "o", "w", "er", "Ġ", "n", "e", "w", "er"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("studio-ousia/luke-large") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_text_from_decode = tokenizer.encode( "sequence builders", add_special_tokens=True, add_prefix_space=False ) encoded_pair_from_decode = tokenizer.encode( "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False ) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) self.assertEqual(encoded_sentence, encoded_text_from_decode) self.assertEqual(encoded_pair, encoded_pair_from_decode) def get_clean_sequence(self, tokenizer, max_length=20) -> Tuple[str, list]: txt = "Beyonce lives in Los Angeles" ids = tokenizer.encode(txt, add_special_tokens=False) return txt, ids def test_space_encoding(self): tokenizer = self.get_tokenizer() sequence = "Encode this sequence." space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]] encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertNotEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertEqual(first_char, space_encoding) tokenizer.add_special_tokens({"bos_token": "<s>"}) encoded = tokenizer.encode(sequence, add_special_tokens=True) first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0] self.assertNotEqual(first_char, space_encoding) mask = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)} ) mask_ind = tokenizer.convert_tokens_to_ids(mask) sequence = "Encode <mask> sequence" sequence_nospace = "Encode <mask>sequence" encoded = tokenizer.encode(sequence) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence_nospace) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertNotEqual(first_char, space_encoding) def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest("{} ({})".format(tokenizer.__class__.__name__, pretrained_name)): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) self.assertEqual( sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def test_padding_entity_inputs(self): tokenizer = self.get_tokenizer() sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." span = (15, 34) pad_id = tokenizer.entity_vocab["[PAD]"] mask_id = tokenizer.entity_vocab["[MASK]"] encoding = tokenizer([sentence, sentence], entity_spans=[[span], [span, span]], padding=True) self.assertEqual(encoding["entity_ids"], [[mask_id, pad_id], [mask_id, mask_id]]) encoding = tokenizer([sentence, sentence], entity_spans=[[], [span, span]], padding=True) self.assertEqual(encoding["entity_ids"], [[pad_id, pad_id], [mask_id, mask_id]]) def test_if_tokenize_single_text_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer() sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." spans = [(15, 34)] entities = ["East Asian language"] with self.assertRaises(ValueError): tokenizer(sentence, entities=tuple(entities), entity_spans=spans) with self.assertRaises(ValueError): tokenizer(sentence, entities=entities, entity_spans=tuple(spans)) with self.assertRaises(ValueError): tokenizer(sentence, entities=[0], entity_spans=spans) with self.assertRaises(ValueError): tokenizer(sentence, entities=entities, entity_spans=[0]) with self.assertRaises(ValueError): tokenizer(sentence, entities=entities, entity_spans=spans + [(0, 9)]) def test_if_tokenize_entity_classification_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer(task="entity_classification") sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." span = (15, 34) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[span, span]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[0]) def test_if_tokenize_entity_pair_classification_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer(task="entity_pair_classification") sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[0, 0]) def test_if_tokenize_entity_span_classification_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer(task="entity_span_classification") sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[0, 0, 0]) @slow @require_torch class LukeTokenizerIntegrationTests(unittest.TestCase): tokenizer_class = LukeTokenizer from_pretrained_kwargs = {"cls_token": "<s>"} def setUp(self): super().setUp() def test_single_text_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday", "Dummy Entity"] spans = [(9, 21), (30, 38), (39, 42)] encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][9:10], spaces_between_special_tokens=False), " she") self.assertEqual( encoding["entity_ids"], [ tokenizer.entity_vocab["Ana Ivanovic"], tokenizer.entity_vocab["Thursday"], tokenizer.entity_vocab["[UNK]"], ], ) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) def test_single_text_only_entity_spans_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(9, 21), (30, 38), (39, 42)] encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][9:10], spaces_between_special_tokens=False), " she") mask_id = tokenizer.entity_vocab["[MASK]"] self.assertEqual(encoding["entity_ids"], [mask_id, mask_id, mask_id]) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ], [9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ] ] ) def test_single_text_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday", "Dummy Entity"] spans = [(9, 21), (30, 38), (39, 42)] encoding = tokenizer( sentence, entities=entities, entity_spans=spans, return_token_type_ids=True, padding="max_length", max_length=30, max_entity_length=16, return_tensors="pt", ) self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) self.assertEqual(encoding["entity_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length)) def test_text_pair_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday" sentence_pair = "She could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday"] entities_pair = ["Dummy Entity"] spans = [(9, 21), (30, 38)] spans_pair = [(0, 3)] encoding = tokenizer( sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, ) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][11:12], spaces_between_special_tokens=False), "She") self.assertEqual( encoding["entity_ids"], [ tokenizer.entity_vocab["Ana Ivanovic"], tokenizer.entity_vocab["Thursday"], tokenizer.entity_vocab["[UNK]"], ], ) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) def test_text_pair_only_entity_spans_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday" sentence_pair = "She could hardly believe her luck." spans = [(9, 21), (30, 38)] spans_pair = [(0, 3)] encoding = tokenizer( sentence, sentence_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, ) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][11:12], spaces_between_special_tokens=False), "She") mask_id = tokenizer.entity_vocab["[MASK]"] self.assertEqual(encoding["entity_ids"], [mask_id, mask_id, mask_id]) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) def test_text_pair_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday" sentence_pair = "She could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday"] entities_pair = ["Dummy Entity"] spans = [(9, 21), (30, 38)] spans_pair = [(0, 3)] encoding = tokenizer( sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, padding="max_length", max_length=30, max_entity_length=16, return_tensors="pt", ) self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) self.assertEqual(encoding["entity_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length)) def test_entity_classification_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification") sentence = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(sentence, entity_spans=[span], return_token_type_ids=True) self.assertEqual(len(encoding["input_ids"]), 42) self.assertEqual(len(encoding["attention_mask"]), 42) self.assertEqual(len(encoding["token_type_ids"]), 42) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday<ent> she<ent> could hardly believe her luck as a fortuitous" " netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][9:12], spaces_between_special_tokens=False), "<ent> she<ent>" ) self.assertEqual(encoding["entity_ids"], [2]) self.assertEqual(encoding["entity_attention_mask"], [1]) self.assertEqual(encoding["entity_token_type_ids"], [0]) self.assertEqual( encoding["entity_position_ids"], [ [9, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1] ] ) def test_entity_classification_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_classification", return_token_type_ids=True ) sentence = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer( sentence, entity_spans=[span], return_token_type_ids=True, padding="max_length", return_tensors="pt" ) self.assertEqual(encoding["input_ids"].shape, (1, 512)) self.assertEqual(encoding["attention_mask"].shape, (1, 512)) self.assertEqual(encoding["token_type_ids"].shape, (1, 512)) self.assertEqual(encoding["entity_ids"].shape, (1, 1)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 1)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 1)) self.assertEqual( encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length) ) def test_entity_pair_classification_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_pair_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(9, 21), (39, 42)] encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed<ent> Ana Ivanovic<ent> said on Thursday<ent2> she<ent2> could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:8], spaces_between_special_tokens=False), "<ent> Ana Ivanovic<ent>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][11:14], spaces_between_special_tokens=False), "<ent2> she<ent2>" ) self.assertEqual(encoding["entity_ids"], [2, 3]) self.assertEqual(encoding["entity_attention_mask"], [1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0]) self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [11, 12, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) def test_entity_pair_classification_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_pair_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(9, 21), (39, 42)] encoding = tokenizer( sentence, entity_spans=spans, return_token_type_ids=True, padding="max_length", max_length=30, return_tensors="pt", ) self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) self.assertEqual(encoding["entity_ids"].shape, (1, 2)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 2)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 2)) self.assertEqual( encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length) ) def test_entity_span_classification_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_span_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(0, 8), (9, 21), (39, 42)] encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>", ) self.assertEqual(encoding["entity_ids"], [2, 2, 2]) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) self.assertEqual( encoding["entity_position_ids"], [ [1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) self.assertEqual(encoding["entity_start_positions"], [1, 3, 9]) self.assertEqual(encoding["entity_end_positions"], [2, 5, 9]) def test_entity_span_classification_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_span_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(0, 8), (9, 21), (39, 42)] encoding = tokenizer( sentence, entity_spans=spans, return_token_type_ids=True, padding="max_length", max_length=30, max_entity_length=16, return_tensors="pt", ) self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) self.assertEqual(encoding["entity_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length)) self.assertEqual(encoding["entity_start_positions"].shape, (1, 16)) self.assertEqual(encoding["entity_end_positions"].shape, (1, 16))
codingutf8 2018 lxmert s the hugging face team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite function because qa models takes different input label shape special case for models like bert that use multiloss training for pretraining check that outputattentions also work using config check attention is always last and order is fine 2 hidden states were added check that outputhiddenstates also work using config no need to test all models as different heads yield the same functionality skip key that does not exist in tf other general float inputs coding utf 8 2018 lxmert s the hugging face team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite function because qa models takes different input label shape special case for models like bert that use multi loss training for pretraining check that output_attentions also work using config check attention is always last and order is fine 2 hidden states were added check that output_hidden_states also work using config no need to test all models as different heads yield the same functionality skip key that does not exist in tf other general float inputs
import copy import unittest import numpy as np from transformers import LxmertConfig, is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, ) from transformers.models.lxmert.modeling_lxmert import LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST if is_tf_available(): import tensorflow as tf class LxmertModelTester: def __init__( self, parent, vocab_size=300, hidden_size=28, num_attention_heads=2, num_labels=2, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, num_qa_labels=30, num_object_labels=16, num_attr_labels=4, num_visual_features=10, l_layers=2, x_layers=1, r_layers=1, visual_feat_dim=128, visual_pos_dim=4, visual_loss_normalizer=6.67, seq_length=20, batch_size=4, is_training=True, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, use_token_type_ids=True, use_lang_mask=True, output_attentions=False, output_hidden_states=False, scope=None, ): self.parent = parent self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_labels = num_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.pad_token_id = pad_token_id self.num_qa_labels = num_qa_labels self.num_object_labels = num_object_labels self.num_attr_labels = num_attr_labels self.l_layers = l_layers self.x_layers = x_layers self.r_layers = r_layers self.visual_feat_dim = visual_feat_dim self.visual_pos_dim = visual_pos_dim self.visual_loss_normalizer = visual_loss_normalizer self.seq_length = seq_length self.batch_size = batch_size self.is_training = is_training self.use_lang_mask = use_lang_mask self.task_matched = task_matched self.task_mask_lm = task_mask_lm self.task_obj_predict = task_obj_predict self.task_qa = task_qa self.visual_obj_loss = visual_obj_loss self.visual_attr_loss = visual_attr_loss self.visual_feat_loss = visual_feat_loss self.num_visual_features = num_visual_features self.use_token_type_ids = use_token_type_ids self.output_attentions = output_attentions self.output_hidden_states = output_hidden_states self.scope = scope self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} def prepare_config_and_inputs(self): output_attentions = self.output_attentions input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_size=self.vocab_size) visual_feats = torch.rand(self.batch_size, self.num_visual_features, self.visual_feat_dim, device=torch_device) bounding_boxes = torch.rand(self.batch_size, self.num_visual_features, 4, device=torch_device) input_mask = None if self.use_lang_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) obj_labels = None if self.task_obj_predict: obj_labels = {} if self.visual_attr_loss and self.task_obj_predict: obj_labels["attr"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ) if self.visual_feat_loss and self.task_obj_predict: obj_labels["feat"] = ( ids_tensor( [self.batch_size, self.num_visual_features, self.visual_feat_dim], self.num_visual_features ), ids_tensor([self.batch_size, self.num_visual_features], self.num_visual_features), ) if self.visual_obj_loss and self.task_obj_predict: obj_labels["obj"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ) ans = None if self.task_qa: ans = ids_tensor([self.batch_size], self.num_qa_labels) masked_lm_labels = None if self.task_mask_lm: masked_lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) matched_label = None if self.task_matched: matched_label = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) def get_config(self): return LxmertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_attention_heads=self.num_attention_heads, num_labels=self.num_labels, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, pad_token_id=self.pad_token_id, num_qa_labels=self.num_qa_labels, num_object_labels=self.num_object_labels, num_attr_labels=self.num_attr_labels, l_layers=self.l_layers, x_layers=self.x_layers, r_layers=self.r_layers, visual_feat_dim=self.visual_feat_dim, visual_pos_dim=self.visual_pos_dim, visual_loss_normalizer=self.visual_loss_normalizer, task_matched=self.task_matched, task_mask_lm=self.task_mask_lm, task_obj_predict=self.task_obj_predict, task_qa=self.task_qa, visual_obj_loss=self.visual_obj_loss, visual_attr_loss=self.visual_attr_loss, visual_feat_loss=self.visual_feat_loss, output_attentions=self.output_attentions, output_hidden_states=self.output_hidden_states, ) def create_and_check_lxmert_model( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = LxmertModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=not output_attentions, ) result = model(input_ids, visual_feats, bounding_boxes, return_dict=False) result = model(input_ids, visual_feats, bounding_boxes, return_dict=True) self.parent.assertEqual(result.language_output.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual( result.vision_output.shape, (self.batch_size, self.num_visual_features, self.hidden_size) ) self.parent.assertEqual(result.pooled_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_lxmert_for_question_answering( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = LxmertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, labels=ans, output_attentions=output_attentions, ) result = model(input_ids, visual_feats, bounding_boxes, labels=ans) result = model( input_ids, visual_feats, bounding_boxes, labels=ans, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, labels=ans, output_attentions=not output_attentions, ) self.parent.assertEqual(result.question_answering_score.shape, (self.batch_size, self.num_qa_labels)) def create_and_check_lxmert_for_pretraining( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = LxmertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, output_attentions=not output_attentions, return_dict=False, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, obj_labels=obj_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, matched_label=matched_label, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=not output_attentions, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def resize_lxmert_num_qa_labels( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): start_labels = config.num_qa_labels num_large_labels = config.num_qa_labels * 2 num_small_labels = int(config.num_qa_labels * 2) less_labels_ans = ids_tensor([self.batch_size], num_small_labels) more_labels_ans = ids_tensor([self.batch_size], num_large_labels) model_pretrain = LxmertForPreTraining(config=config).to(torch_device) model_qa = LxmertForQuestionAnswering(config=config).to(torch_device) config.num_labels = num_small_labels end_labels = config.num_labels result_pretrain = model_pretrain( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans, ) result_qa = model_qa( input_ids, visual_feats, bounding_boxes, labels=ans, token_type_ids=token_type_ids, attention_mask=input_mask, ) model_pretrain.resize_num_qa_labels(num_small_labels) model_qa.resize_num_qa_labels(num_small_labels) result_pretrain_less = model_pretrain( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=less_labels_ans, ) result_qa_less = model_qa( input_ids, visual_feats, bounding_boxes, labels=less_labels_ans, token_type_ids=token_type_ids, attention_mask=input_mask, ) model_pretrain.resize_num_qa_labels(num_large_labels) model_qa.resize_num_qa_labels(num_large_labels) result_pretrain_more = model_pretrain( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=more_labels_ans, ) result_qa_more = model_qa( input_ids, visual_feats, bounding_boxes, labels=more_labels_ans, token_type_ids=token_type_ids, attention_mask=input_mask, ) model_qa_labels = model_qa.num_qa_labels self.parent.assertNotEqual(start_labels, end_labels) self.parent.assertNotEqual(model_qa_labels, start_labels) self.parent.assertEqual(result_qa.question_answering_score.shape, (self.batch_size, start_labels)) self.parent.assertEqual(result_pretrain.question_answering_score.shape, (self.batch_size, start_labels)) self.parent.assertEqual(result_qa_less.question_answering_score.shape, (self.batch_size, num_small_labels)) self.parent.assertEqual( result_pretrain_less.question_answering_score.shape, (self.batch_size, num_small_labels) ) self.parent.assertEqual(result_qa_more.question_answering_score.shape, (self.batch_size, num_large_labels)) self.parent.assertEqual( result_pretrain_more.question_answering_score.shape, (self.batch_size, num_large_labels) ) def prepare_config_and_inputs_for_common(self, return_obj_labels=False): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": bounding_boxes, "token_type_ids": token_type_ids, "attention_mask": input_mask, } if return_obj_labels: inputs_dict["obj_labels"] = obj_labels else: config.task_obj_predict = False return config, inputs_dict @require_torch class LxmertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (LxmertModel, LxmertForPreTraining, LxmertForQuestionAnswering) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": LxmertModel, "question-answering": LxmertForQuestionAnswering} if is_torch_available() else {} ) fx_compatible = True test_head_masking = False test_pruning = False test_torchscript = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = LxmertModelTester(self) self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_lxmert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_model(*config_and_inputs) def test_lxmert_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_for_question_answering(*config_and_inputs) def test_lxmert_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_for_pretraining(*config_and_inputs) def test_lxmert_question_answering_labels_resize(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.resize_lxmert_num_qa_labels(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = LxmertModel.from_pretrained(model_name) model.to(torch_device) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 2, len(outputs)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) language_hidden_states, vision_hidden_states = outputs[-2], outputs[-1] self.assertEqual(len(language_hidden_states), self.model_tester.num_hidden_layers["language"] + 1) self.assertEqual(len(vision_hidden_states), self.model_tester.num_hidden_layers["vision"] + 1) seq_length = self.model_tester.seq_length num_visual_features = self.model_tester.num_visual_features self.assertListEqual( list(language_hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) self.assertListEqual( list(vision_hidden_states[0].shape[-2:]), [num_visual_features, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) hidden_states_lang = outputs.language_hidden_states[0] attentions_lang = outputs.language_attentions[0] hidden_states_vision = outputs.vision_hidden_states[0] attentions_vision = outputs.vision_attentions[0] hidden_states_lang.retain_grad() attentions_lang.retain_grad() hidden_states_vision.retain_grad() attentions_vision.retain_grad() outputs.language_output.flatten()[0].backward(retain_graph=True) outputs.vision_output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states_lang.grad) self.assertIsNotNone(attentions_vision.grad) self.assertIsNotNone(hidden_states_vision.grad) self.assertIsNotNone(attentions_vision.grad) def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict = {} for key, value in pt_inputs_dict.items(): if isinstance(value, dict): tf_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value) elif isinstance(value, (list, tuple)): tf_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value) elif isinstance(value, bool): tf_inputs_dict[key] = value elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) elif key == "pixel_values": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) elif key == "input_features": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) elif value.is_floating_point(): tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) else: tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.int32) return tf_inputs_dict @require_torch class LxmertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = LxmertModel.from_pretrained(LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) input_ids = torch.tensor([[101, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 102]]) num_visual_features = 10 _, visual_feats = np.random.seed(0), np.random.rand(1, num_visual_features, model.config.visual_feat_dim) _, visual_pos = np.random.seed(0), np.random.rand(1, num_visual_features, 4) visual_feats = torch.as_tensor(visual_feats, dtype=torch.float32) visual_pos = torch.as_tensor(visual_pos, dtype=torch.float32) output = model(input_ids, visual_feats=visual_feats, visual_pos=visual_pos)[0] expected_shape = torch.Size([1, 11, 768]) self.assertEqual(expected_shape, output.shape) expected_slice = torch.tensor( [[[0.2417, -0.9807, 0.1480], [1.2541, -0.8320, 0.5112], [1.4070, -1.1052, 0.6990]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check attention is always last and order is fine 2 hidden states were added other general float inputs coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check attention is always last and order is fine 2 hidden states were added other general float inputs
from __future__ import annotations import tempfile import unittest import numpy as np from transformers import LxmertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.lxmert.modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel class TFLxmertModelTester(object): def __init__( self, parent, vocab_size=300, hidden_size=28, num_attention_heads=2, num_labels=2, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, num_qa_labels=30, num_object_labels=16, num_attr_labels=4, num_visual_features=10, l_layers=2, x_layers=1, r_layers=1, visual_feat_dim=128, visual_pos_dim=4, visual_loss_normalizer=6.67, seq_length=20, batch_size=8, is_training=True, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, use_token_type_ids=True, use_lang_mask=True, output_attentions=False, output_hidden_states=False, scope=None, ): self.parent = parent self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_labels = num_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.pad_token_id = pad_token_id self.num_qa_labels = num_qa_labels self.num_object_labels = num_object_labels self.num_attr_labels = num_attr_labels self.l_layers = l_layers self.x_layers = x_layers self.r_layers = r_layers self.visual_feat_dim = visual_feat_dim self.visual_pos_dim = visual_pos_dim self.visual_loss_normalizer = visual_loss_normalizer self.seq_length = seq_length self.batch_size = batch_size self.is_training = is_training self.use_lang_mask = use_lang_mask self.task_matched = task_matched self.task_mask_lm = task_mask_lm self.task_obj_predict = task_obj_predict self.task_qa = task_qa self.visual_obj_loss = visual_obj_loss self.visual_attr_loss = visual_attr_loss self.visual_feat_loss = visual_feat_loss self.num_visual_features = num_visual_features self.use_token_type_ids = use_token_type_ids self.output_attentions = output_attentions self.output_hidden_states = output_hidden_states self.scope = scope self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} def prepare_config_and_inputs(self): output_attentions = self.output_attentions input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_size=self.vocab_size) visual_feats = tf.random.uniform((self.batch_size, self.num_visual_features, self.visual_feat_dim)) bounding_boxes = tf.random.uniform((self.batch_size, self.num_visual_features, 4)) input_mask = None if self.use_lang_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) obj_labels = None if self.task_obj_predict: obj_labels = {} if self.visual_attr_loss and self.task_obj_predict: obj_labels["attr"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ) if self.visual_feat_loss and self.task_obj_predict: obj_labels["feat"] = ( ids_tensor( [self.batch_size, self.num_visual_features, self.visual_feat_dim], self.num_visual_features ), ids_tensor([self.batch_size, self.num_visual_features], self.num_visual_features), ) if self.visual_obj_loss and self.task_obj_predict: obj_labels["obj"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ) ans = None if self.task_qa: ans = ids_tensor([self.batch_size], self.num_qa_labels) masked_lm_labels = None if self.task_mask_lm: masked_lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) matched_label = None if self.task_matched: matched_label = ids_tensor([self.batch_size], self.num_labels) config = LxmertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_attention_heads=self.num_attention_heads, num_labels=self.num_labels, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, pad_token_id=self.pad_token_id, num_qa_labels=self.num_qa_labels, num_object_labels=self.num_object_labels, num_attr_labels=self.num_attr_labels, l_layers=self.l_layers, x_layers=self.x_layers, r_layers=self.r_layers, visual_feat_dim=self.visual_feat_dim, visual_pos_dim=self.visual_pos_dim, visual_loss_normalizer=self.visual_loss_normalizer, task_matched=self.task_matched, task_mask_lm=self.task_mask_lm, task_obj_predict=self.task_obj_predict, task_qa=self.task_qa, visual_obj_loss=self.visual_obj_loss, visual_attr_loss=self.visual_attr_loss, visual_feat_loss=self.visual_feat_loss, output_attentions=self.output_attentions, output_hidden_states=self.output_hidden_states, ) return ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) def create_and_check_lxmert_model( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = TFLxmertModel(config=config) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=not output_attentions, ) result = model(input_ids, visual_feats, bounding_boxes, return_dict=False) result = model(input_ids, visual_feats, bounding_boxes, return_dict=True) self.parent.assertEqual(result.language_output.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual( result.vision_output.shape, (self.batch_size, self.num_visual_features, self.hidden_size) ) self.parent.assertEqual(result.pooled_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self, return_obj_labels=False): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": bounding_boxes, "token_type_ids": token_type_ids, "attention_mask": input_mask, } if return_obj_labels: inputs_dict["obj_labels"] = obj_labels else: config.task_obj_predict = False return config, inputs_dict def create_and_check_lxmert_for_pretraining( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = TFLxmertForPreTraining(config=config) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, output_attentions=not output_attentions, return_dict=False, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, obj_labels=obj_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, matched_label=matched_label, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=not output_attentions, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) @require_tf class TFLxmertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFLxmertModel, TFLxmertForPreTraining) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFLxmertModel} if is_tf_available() else {} test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFLxmertModelTester(self) self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_lxmert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_model(*config_and_inputs) def test_lxmert_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_for_pretraining(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ["unc-nlp/lxmert-base-uncased"]: model = TFLxmertModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() encoder_seq_length = ( self.model_tester.encoder_seq_length if hasattr(self.model_tester, "encoder_seq_length") else self.model_tester.seq_length ) encoder_key_length = ( self.model_tester.key_length if hasattr(self.model_tester, "key_length") else encoder_seq_length ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(model.config.output_hidden_states, False) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 2, len(outputs)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) language_hidden_states, vision_hidden_states = outputs[-2], outputs[-1] self.assertEqual(len(language_hidden_states), self.model_tester.num_hidden_layers["language"] + 1) self.assertEqual(len(vision_hidden_states), self.model_tester.num_hidden_layers["vision"] + 1) seq_length = self.model_tester.seq_length num_visual_features = self.model_tester.num_visual_features self.assertListEqual( list(language_hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) self.assertListEqual( list(vision_hidden_states[0].shape[-2:]), [num_visual_features, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): import torch pt_inputs_dict = {} for key, value in tf_inputs_dict.items(): if isinstance(value, dict): pt_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value) elif isinstance(value, (list, tuple)): pt_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value) elif isinstance(key, bool): pt_inputs_dict[key] = value elif key == "input_values": pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32) elif key == "pixel_values": pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32) elif key == "input_features": pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32) elif tf_inputs_dict[key].dtype.is_floating: pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32) else: pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.long) return pt_inputs_dict def test_save_load(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common( return_obj_labels="PreTraining" in model_class.__name__ ) model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) after_outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) @require_tf class TFLxmertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFLxmertModel.from_pretrained("unc-nlp/lxmert-base-uncased") input_ids = tf.constant([[101, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 102]]) num_visual_features = 10 _, visual_feats = np.random.seed(0), np.random.rand(1, num_visual_features, model.config.visual_feat_dim) _, visual_pos = np.random.seed(0), np.random.rand(1, num_visual_features, 4) visual_feats = tf.convert_to_tensor(visual_feats, dtype=tf.float32) visual_pos = tf.convert_to_tensor(visual_pos, dtype=tf.float32) output = model(input_ids, visual_feats=visual_feats, visual_pos=visual_pos)[0] expected_shape = [1, 11, 768] self.assertEqual(expected_shape, output.shape) expected_slice = tf.constant( [ [ [0.24170142, -0.98075, 0.14797261], [1.2540525, -0.83198136, 0.5112344], [1.4070463, -1.1051831, 0.6990401], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch m2m100 model import copy import tempfile import unittest from transformers import m2m100config istorchavailable from transformers testingutils import requiresentencepiece requiretokenizers requiretorch requiretorchfp16 slow torchdevice from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import m2m100forconditionalgeneration m2m100model m2m100tokenizer from transformers models m2m100 modelingm2m100 import m2m100decoder m2m100encoder def preparem2m100inputsdict config inputids decoderinputids attentionmasknone decoderattentionmasknone headmasknone decoderheadmasknone crossattnheadmasknone if attentionmask is none attentionmask inputids neconfig padtokenid if decoderattentionmask is none decoderattentionmask decoderinputids neconfig padtokenid if headmask is none headmask torch onesconfig encoderlayers config encoderattentionheads devicetorchdevice if decoderheadmask is none decoderheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice if crossattnheadmask is none crossattnheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice return inputids inputids decoderinputids decoderinputids attentionmask attentionmask decoderattentionmask attentionmask headmask headmask decoderheadmask decoderheadmask crossattnheadmask crossattnheadmask class m2m100modeltester def init self parent batchsize13 seqlength7 istrainingtrue uselabelsfalse vocabsize99 hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 hiddenactrelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 encoderlayerdrop0 0 decoderlayerdrop0 0 maxpositionembeddings20 eostokenid2 padtokenid1 bostokenid0 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self encoderlayerdrop encoderlayerdrop self decoderlayerdrop decoderlayerdrop self maxpositionembeddings maxpositionembeddings self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputids 1 self eostokenid eos token decoderinputids idstensorself batchsize self seqlength self vocabsize we need to clamp the input ids here to avoid having pad token in between this is because for m2m100 the positionids are prepared such that all pad tokens have pos id 2 and rest are between 2 seqlength and the seqlength here is seqlength numpadtokens but when using past there is no way of knowing if the past input ids had pad tokens in them which results in incorrect seqlenth and which in turn results in positionids being off by numpadtokens in past input inputids inputids clampself padtokenid 1 decoderinputids decoderinputids clampself padtokenid 1 config self getconfig inputsdict preparem2m100inputsdictconfig inputids decoderinputids return config inputsdict def getconfigself return m2m100config vocabsizeself vocabsize dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob encoderlayerdropself encoderlayerdrop decoderlayerdropself decoderlayerdrop maxpositionembeddingsself maxpositionembeddings eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def createandcheckdecodermodelpastlargeinputsself config inputsdict model m2m100modelconfigconfig getdecoder totorchdevice eval inputids inputsdictinputids attentionmask inputsdictattentionmask headmask inputsdictheadmask first forward pass outputs modelinputids attentionmaskattentionmask headmaskheadmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e2 def checkencoderdecodermodelstandaloneself config inputsdict model m2m100modelconfigconfig totorchdevice eval outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder m2m100encoder frompretrainedtmpdirname totorchdevice encoderlasthiddenstate2 encoderinputsdictinputids attentionmaskinputsdictattentionmask 0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max item 1e3 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder m2m100decoder frompretrainedtmpdirname totorchdevice lasthiddenstate2 decoder inputidsinputsdictdecoderinputids attentionmaskinputsdictdecoderattentionmask encoderhiddenstatesencoderlasthiddenstate encoderattentionmaskinputsdictattentionmask 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max item 1e3 requiretorch class m2m100modeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses m2m100model m2m100forconditionalgeneration if istorchavailable else allgenerativemodelclasses m2m100forconditionalgeneration if istorchavailable else pipelinemodelmapping conversational m2m100forconditionalgeneration featureextraction m2m100model summarization m2m100forconditionalgeneration text2textgeneration m2m100forconditionalgeneration translation m2m100forconditionalgeneration if istorchavailable else isencoderdecoder true fxcompatible true testpruning false testmissingkeys false todo fix the failed tests def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname if pipelinetestcassename translationpipelinetests get valueerror translation requires a srclang and a tgtlang for this model m2m100config was never used in pipeline tests cannot create a simple tokenizer return true return false def setupself self modeltester m2m100modeltesterself self configtester configtesterself configclassm2m100config def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testencoderdecodermodelstandaloneself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkencoderdecodermodelstandaloneconfigandinputs def testinputsembedsself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in m2m100model m2m100forconditionalgeneration model modelclassconfig model totorchdevice model eval inputs copy deepcopyself prepareforclassinputsdict modelclass if not self isencoderdecoder inputids inputsinputids del inputsinputids else encoderinputids inputsinputids decoderinputids inputs getdecoderinputids encoderinputids del inputsinputids inputs popdecoderinputids none wte model getinputembeddings if not self isencoderdecoder inputsinputsembeds wteinputids else inputsinputsembeds wteencoderinputids inputsdecoderinputsembeds wtedecoderinputids with torch nograd modelinputs0 requiretorchfp16 def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs inputids inputdictinputids attentionmask inputids ne1 totorchdevice model m2m100forconditionalgenerationconfig eval totorchdevice model half model generateinputids attentionmaskattentionmask model generatenumbeams4 dosampletrue earlystoppingfalse numreturnsequences3 def longtensortoklst return torch tensortoklst dtypetorch long devicetorchdevice tolerance 1e4 requiretorch requiresentencepiece requiretokenizers slow class m2m100modelintegrationtestsunittest testcase cachedproperty def defaulttokenizerself return m2m100tokenizer frompretrainedfacebookm2m100418m def testinferencenoheadself model m2m100model frompretrainedfacebookm2m100418m totorchdevice inputids longtensor128028 98 12 30527 2732 159 7755 61904 39144 38 2 decoderinputids longtensor2 128028 98 12 30527 2732 159 7755 61904 39144 38 inputsdict preparem2m100inputsdictmodel config inputids decoderinputids with torch nograd output modelinputsdict0 expectedshape torch size1 11 1024 self assertequaloutput shape expectedshape change to expected output here expectedslice torch tensor 0 7780 0 1676 0 1038 6 7556 1 3992 0 0567 7 5383 0 5920 0 2779 devicetorchdevice self asserttruetorch allcloseoutput 3 3 expectedslice atoltolerance def testinferenceheadself model m2m100forconditionalgeneration frompretrainedfacebookm2m100418m totorchdevice change to intended input inputids longtensor128028 98 12 30527 2732 159 7755 61904 39144 38 2 decoderinputids longtensor2 128028 98 12 30527 2732 159 7755 61904 39144 38 inputsdict preparem2m100inputsdictmodel config inputids decoderinputids with torch nograd output modelinputsdict0 expectedshape torch size1 11 model config vocabsize self assertequaloutput shape expectedshape change to expected output here expectedslice torch tensor 1 0448 1 0411 3 7992 3 2191 3 2386 1 3451 3 6210 3 5993 0 4925 devicetorchdevice self asserttruetorch allcloseoutput 3 3 expectedslice atoltolerance def testseqtoseqgenerationself model m2m100forconditionalgeneration frompretrainedfacebookm2m100418m totorchdevice tokenizer m2m100tokenizer frompretrainedfacebookm2m100418m srclangfr tgtlangen srcfr l affaire nsa souligne l absence totale de dbat sur le renseignement selon moi il y a deux niveaux de rponse de la part du gouvernement franais lorsque franois hollande tlphone barack obama ou quand le ministre des affaires trangres laurent fabius convoque l ambassadeur des etatsunis ils ragissent une vraie dcouverte qui est celle de l ampleur de la surveillance amricaine sur l ensemble des communications en france the below article tests that we don t add any hypotheses outside of the top nbeams dct tokenizersrcfr paddingtrue returntensorspt hypothesesbatch model generate inputidsdctinputids totorchdevice attentionmaskdctattentionmask totorchdevice numbeams5 forcedbostokenidtokenizer getlangiden expecteden the nsa case highlights the total absence of intelligence debate i think there are two levels of response from the french government when franois hollande calls barack obama or when foreign minister laurent fabius calls the u s ambassador they respond to a real discovery which is that of the scale of u s surveillance on all communications in france generated tokenizer batchdecode hypothesesbatch tolist cleanuptokenizationspacestrue skipspecialtokenstrue assert generated expecteden coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch m2m100 model eos token we need to clamp the input ids here to avoid having pad token in between this is because for m2m100 the position_ids are prepared such that all pad tokens have pos id 2 and rest are between 2 seq_length and the seq_length here is seq_length num_pad_tokens but when using past there is no way of knowing if the past input ids had pad tokens in them which results in incorrect seq_lenth and which in turn results in position_ids being off by num_pad_tokens in past input first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice todo fix the failed tests get valueerror translation requires a src_lang and a tgt_lang for this model m2m100config was never used in pipeline tests cannot create a simple tokenizer change to expected output here change to intended input change to expected output here the below article tests that we don t add any hypotheses outside of the top n_beams
import copy import tempfile import unittest from transformers import M2M100Config, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import M2M100ForConditionalGeneration, M2M100Model, M2M100Tokenizer from transformers.models.m2m_100.modeling_m2m_100 import M2M100Decoder, M2M100Encoder def prepare_m2m_100_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class M2M100ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, encoder_layerdrop=0.0, decoder_layerdrop=0.0, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[:, -1] = self.eos_token_id decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = input_ids.clamp(self.pad_token_id + 1) decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1) config = self.get_config() inputs_dict = prepare_m2m_100_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return M2M100Config( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, encoder_layerdrop=self.encoder_layerdrop, decoder_layerdrop=self.decoder_layerdrop, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = M2M100Model(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = M2M100Model(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = M2M100Encoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = M2M100Decoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class M2M100ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( M2M100Model, M2M100ForConditionalGeneration, ) if is_torch_available() else () ) all_generative_model_classes = (M2M100ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": M2M100ForConditionalGeneration, "feature-extraction": M2M100Model, "summarization": M2M100ForConditionalGeneration, "text2text-generation": M2M100ForConditionalGeneration, "translation": M2M100ForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TranslationPipelineTests": return True return False def setUp(self): self.model_tester = M2M100ModelTester(self) self.config_tester = ConfigTester(self, config_class=M2M100Config) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (M2M100Model, M2M100ForConditionalGeneration): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = M2M100ForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class M2M100ModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") def test_inference_no_head(self): model = M2M100Model.from_pretrained("facebook/m2m100_418M").to(torch_device) input_ids = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]]) decoder_input_ids = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]]) inputs_dict = prepare_m2m_100_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device) input_ids = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]]) decoder_input_ids = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]]) inputs_dict = prepare_m2m_100_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, model.config.vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device) tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="fr", tgt_lang="en") src_fr = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] dct = tokenizer(src_fr, padding=True, return_tensors="pt") hypotheses_batch = model.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=5, forced_bos_token_id=tokenizer.get_lang_id("en"), ) expected_en = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] generated = tokenizer.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == expected_en