Description
stringlengths
18
161k
Code
stringlengths
15
300k
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bark model import copy import inspect import tempfile import unittest import pytest from transformers import barkcoarseconfig barkconfig barkfineconfig barksemanticconfig istorchavailable from transformers models bark generationconfigurationbark import barkcoarsegenerationconfig barkfinegenerationconfig barksemanticgenerationconfig from transformers testingutils import requireflashattn requiretorch requiretorchfp16 requiretorchgpu slow torchdevice from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from encodec testmodelingencodec import encodecmodeltester if istorchavailable import torch from transformers import barkcausalmodel barkcoarsemodel barkfinemodel barkmodel barkprocessor barksemanticmodel class barksemanticmodeltester def init self parent batchsize2 seqlength4 istrainingfalse for now training is not supported useinputmasktrue uselabelstrue vocabsize33 outputvocabsize33 hiddensize16 numhiddenlayers2 numattentionheads2 intermediatesize15 dropout0 1 windowsize256 initializerrange0 02 ncodestotal8 for barkfinemodel ncodesgiven1 for barkfinemodel self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self outputvocabsize outputvocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self windowsize windowsize self initializerrange initializerrange self bostokenid outputvocabsize 1 self eostokenid outputvocabsize 1 self padtokenid outputvocabsize 1 self ncodestotal ncodestotal self ncodesgiven ncodesgiven self isencoderdecoder false def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength config self getconfig headmask idstensorself numhiddenlayers self numattentionheads 2 inputsdict inputids inputids headmask headmask attentionmask inputmask return config inputsdict def getconfigself return barksemanticconfig vocabsizeself vocabsize outputvocabsizeself outputvocabsize hiddensizeself hiddensize numlayersself numhiddenlayers numheadsself numattentionheads usecachetrue bostokenidself bostokenid eostokenidself eostokenid padtokenidself padtokenid windowsizeself windowsize def getpipelineconfigself config self getconfig config vocabsize 300 config outputvocabsize 300 return config def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def createandcheckdecodermodelpastlargeinputsself config inputsdict model barksemanticmodelconfigconfig totorchdevice eval inputids inputsdictinputids attentionmask inputsdictattentionmask first forward pass outputs modelinputids attentionmaskattentionmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklogits outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues logits select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 test no attentionmask works outputs modelinputids usecachetrue pastkeyvalues outputs totuple outputfromnopast modelnextinputidslogits outputfrompast modelnexttokens pastkeyvaluespastkeyvalueslogits randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 class barkcoarsemodeltester def init self parent batchsize2 seqlength4 istrainingfalse for now training is not supported useinputmasktrue uselabelstrue vocabsize33 outputvocabsize33 hiddensize16 numhiddenlayers2 numattentionheads2 intermediatesize15 dropout0 1 windowsize256 initializerrange0 02 ncodestotal8 for barkfinemodel ncodesgiven1 for barkfinemodel self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self outputvocabsize outputvocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self windowsize windowsize self initializerrange initializerrange self bostokenid outputvocabsize 1 self eostokenid outputvocabsize 1 self padtokenid outputvocabsize 1 self ncodestotal ncodestotal self ncodesgiven ncodesgiven self isencoderdecoder false def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength config self getconfig headmask idstensorself numhiddenlayers self numattentionheads 2 inputsdict inputids inputids headmask headmask attentionmask inputmask return config inputsdict def getconfigself return barkcoarseconfig vocabsizeself vocabsize outputvocabsizeself outputvocabsize hiddensizeself hiddensize numlayersself numhiddenlayers numheadsself numattentionheads usecachetrue bostokenidself bostokenid eostokenidself eostokenid padtokenidself padtokenid windowsizeself windowsize def getpipelineconfigself config self getconfig config vocabsize 300 config outputvocabsize 300 return config def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def createandcheckdecodermodelpastlargeinputsself config inputsdict model barkcoarsemodelconfigconfig totorchdevice eval inputids inputsdictinputids attentionmask inputsdictattentionmask first forward pass outputs modelinputids attentionmaskattentionmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklogits outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues logits select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 test no attentionmask works outputs modelinputids usecachetrue pastkeyvalues outputs totuple outputfromnopast modelnextinputidslogits outputfrompast modelnexttokens pastkeyvaluespastkeyvalueslogits randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 class barkfinemodeltester def init self parent batchsize2 seqlength4 istrainingfalse for now training is not supported useinputmasktrue uselabelstrue vocabsize33 outputvocabsize33 hiddensize16 numhiddenlayers2 numattentionheads2 intermediatesize15 dropout0 1 windowsize256 initializerrange0 02 ncodestotal8 for barkfinemodel ncodesgiven1 for barkfinemodel self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self outputvocabsize outputvocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self windowsize windowsize self initializerrange initializerrange self bostokenid outputvocabsize 1 self eostokenid outputvocabsize 1 self padtokenid outputvocabsize 1 self ncodestotal ncodestotal self ncodesgiven ncodesgiven self isencoderdecoder false def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self ncodestotal self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength config self getconfig headmask idstensorself numhiddenlayers self numattentionheads 2 randint between self ncodesgiven 1 and self ncodestotal 1 codebookidx idstensor1 self ncodestotal self ncodesgiven item self ncodesgiven inputsdict codebookidx codebookidx inputids inputids headmask headmask attentionmask inputmask return config inputsdict def getconfigself return barkfineconfig vocabsizeself vocabsize outputvocabsizeself outputvocabsize hiddensizeself hiddensize numlayersself numhiddenlayers numheadsself numattentionheads usecachetrue bostokenidself bostokenid eostokenidself eostokenid padtokenidself padtokenid windowsizeself windowsize def getpipelineconfigself config self getconfig config vocabsize 300 config outputvocabsize 300 return config def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def createandcheckdecodermodelpastlargeinputsself config inputsdict model barkfinemodelconfigconfig totorchdevice eval inputids inputsdictinputids attentionmask inputsdictattentionmask first forward pass outputs modelinputids attentionmaskattentionmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklogits outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues logits select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 test no attentionmask works outputs modelinputids usecachetrue pastkeyvalues outputs totuple outputfromnopast modelnextinputidslogits outputfrompast modelnexttokens pastkeyvaluespastkeyvalueslogits randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 class barkmodeltester def init self parent semantickwargsnone coarseacousticskwargsnone fineacousticskwargsnone codeckwargsnone istrainingfalse for now training is not supported if semantickwargs is none semantickwargs if coarseacousticskwargs is none coarseacousticskwargs if fineacousticskwargs is none fineacousticskwargs if codeckwargs is none codeckwargs self parent parent self semanticmodeltester barksemanticmodeltesterparent semantickwargs self coarseacousticsmodeltester barkcoarsemodeltesterparent coarseacousticskwargs self fineacousticsmodeltester barkfinemodeltesterparent fineacousticskwargs self codecmodeltester encodecmodeltesterparent codeckwargs self istraining istraining def getconfigself return barkconfig fromsubmodelconfigs self semanticmodeltester getconfig self coarseacousticsmodeltester getconfig self fineacousticsmodeltester getconfig self codecmodeltester getconfig def getpipelineconfigself config self getconfig follow the getpipelineconfig of the sub component models config semanticconfig vocabsize 300 config coarseacousticsconfig vocabsize 300 config fineacousticsconfig vocabsize 300 config semanticconfig outputvocabsize 300 config coarseacousticsconfig outputvocabsize 300 config fineacousticsconfig outputvocabsize 300 return config requiretorch class barksemanticmodeltestmodeltestermixin generationtestermixin unittest testcase allmodelclasses barksemanticmodel if istorchavailable else allgenerativemodelclasses barkcausalmodel if istorchavailable else isencoderdecoder false fxcompatible false testmissingkeys false testpruning false testmodelparallel false no modelparallel for now testresizeembeddings true def setupself self modeltester barksemanticmodeltesterself self configtester configtesterself configclassbarksemanticconfig nembd37 def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testinputsembedsself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval inputs copy deepcopyself prepareforclassinputsdict modelclass inputids inputsinputids del inputsinputids wte model getinputembeddings inputsinputembeds wteinputids with torch nograd modelinputs0 requiretorchfp16 def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs inputids inputdictinputids attentionmask inputids ne1 totorchdevice model self allgenerativemodelclasses0config eval totorchdevice model half model generateinputids attentionmaskattentionmask model generatenumbeams4 dosampletrue earlystoppingfalse numreturnsequences3 requiretorch class barkcoarsemodeltestmodeltestermixin generationtestermixin unittest testcase same tester as barksemanticmodeltest except for modelclass and configclass allmodelclasses barkcoarsemodel if istorchavailable else allgenerativemodelclasses barkcausalmodel if istorchavailable else isencoderdecoder false fxcompatible false testmissingkeys false testpruning false testmodelparallel false no modelparallel for now testresizeembeddings true def setupself self modeltester barkcoarsemodeltesterself self configtester configtesterself configclassbarkcoarseconfig nembd37 def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testinputsembedsself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval inputs copy deepcopyself prepareforclassinputsdict modelclass inputids inputsinputids del inputsinputids wte model getinputembeddings inputsinputembeds wteinputids with torch nograd modelinputs0 requiretorchfp16 def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs inputids inputdictinputids attentionmask inputids ne1 totorchdevice model self allgenerativemodelclasses0config eval totorchdevice model half model generateinputids attentionmaskattentionmask model generatenumbeams4 dosampletrue earlystoppingfalse numreturnsequences3 requiretorch class barkfinemodeltestmodeltestermixin unittest testcase allmodelclasses barkfinemodel if istorchavailable else isencoderdecoder false fxcompatible false testmissingkeys false testpruning false no modelparallel for now testmodelparallel false torchscript disabled for now because forward with an int testtorchscript false testresizeembeddings true def setupself self modeltester barkfinemodeltesterself self configtester configtesterself configclassbarkfineconfig nembd37 def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testinputsembedsself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval inputs copy deepcopyself prepareforclassinputsdict modelclass inputids inputsinputids del inputsinputids wte model getinputembeddingsinputsdictcodebookidx inputsinputembeds wteinputids inputsdictcodebookidx with torch nograd modelinputs0 requiretorchfp16 def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs inputids inputdictinputids take first codebook channel model self allmodelclasses0config eval totorchdevice model half toy generationconfigs semanticgenerationconfig barksemanticgenerationconfigsemanticvocabsize0 coarsegenerationconfig barkcoarsegenerationconfigncoarsecodebooksconfig ncodesgiven finegenerationconfig barkfinegenerationconfig maxfinehistorylengthconfig blocksize 2 maxfineinputlengthconfig blocksize nfinecodebooksconfig ncodestotal codebooksize config vocabsize 1 model generate inputids historypromptnone temperaturenone semanticgenerationconfigsemanticgenerationconfig coarsegenerationconfigcoarsegenerationconfig finegenerationconfigfinegenerationconfig codebooksizecodebooksize model generate inputids historypromptnone temperature0 7 semanticgenerationconfigsemanticgenerationconfig coarsegenerationconfigcoarsegenerationconfig finegenerationconfigfinegenerationconfig codebooksizecodebooksize def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames codebookidx inputids self assertlistequalargnames 2 expectedargnames def testmodelcommonattributesself one embedding layer per codebook config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings0 torch nn embedding model setinputembeddings torch nn modulelisttorch nn embedding10 10 for in rangeconfig ncodestotal x model getoutputembeddings self asserttruex is none or isinstancex0 torch nn linear def testresizetokensembeddingsself resizing tokensembeddings of a modulelist originalconfig inputsdict self modeltester prepareconfigandinputsforcommon if not self testresizeembeddings return for modelclass in self allmodelclasses config copy deepcopyoriginalconfig model modelclassconfig model totorchdevice if self modeltester istraining is false model eval modelvocabsize config vocabsize retrieve the embeddings and clone theme modelembedlist model resizetokenembeddingsmodelvocabsize clonedembeddingslist modelembed weight clone for modelembed in modelembedlist check that resizing the token embeddings with a larger vocab size increases the model s vocab size modelembedlist model resizetokenembeddingsmodelvocabsize 10 self assertequalmodel config vocabsize modelvocabsize 10 check that it actually resizes the embeddings matrix for each codebook for modelembed clonedembeddings in zipmodelembedlist clonedembeddingslist self assertequalmodelembed weight shape0 clonedembeddings shape0 10 check that the model can still do a forward pass successfully every parameter should be resized modelself prepareforclassinputsdict modelclass check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size modelembedlist model resizetokenembeddingsmodelvocabsize 15 self assertequalmodel config vocabsize modelvocabsize 15 for modelembed clonedembeddings in zipmodelembedlist clonedembeddingslist self assertequalmodelembed weight shape0 clonedembeddings shape0 15 check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary inputsdictinputids clampmaxmodelvocabsize 15 1 modelself prepareforclassinputsdict modelclass check that adding and removing tokens has not modified the first part of the embedding matrix only check for the first embedding matrix modelsequal true for p1 p2 in zipclonedembeddingslist0 modelembedlist0 weight if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testresizeembeddingsuntiedself resizing tokensembeddings of a modulelist originalconfig inputsdict self modeltester prepareconfigandinputsforcommon if not self testresizeembeddings return originalconfig tiewordembeddings false for modelclass in self allmodelclasses config copy deepcopyoriginalconfig model modelclassconfig totorchdevice if no output embeddings leave test if model getoutputembeddings is none continue check that resizing the token embeddings with a larger vocab size increases the model s vocab size modelvocabsize config vocabsize model resizetokenembeddingsmodelvocabsize 10 self assertequalmodel config vocabsize modelvocabsize 10 outputembedslist model getoutputembeddings for outputembeds in outputembedslist self assertequaloutputembeds weight shape0 modelvocabsize 10 check bias if present if outputembeds bias is not none self assertequaloutputembeds bias shape0 modelvocabsize 10 check that the model can still do a forward pass successfully every parameter should be resized modelself prepareforclassinputsdict modelclass check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size model resizetokenembeddingsmodelvocabsize 15 self assertequalmodel config vocabsize modelvocabsize 15 check that it actually resizes the embeddings matrix outputembedslist model getoutputembeddings for outputembeds in outputembedslist self assertequaloutputembeds weight shape0 modelvocabsize 15 check bias if present if outputembeds bias is not none self assertequaloutputembeds bias shape0 modelvocabsize 15 check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary inputsdictinputids clampmaxmodelvocabsize 15 1 check that the model can still do a forward pass successfully every parameter should be resized modelself prepareforclassinputsdict modelclass requireflashattn requiretorchgpu pytest mark flashattntest slow def testflashattn2inferenceself for modelclass in self allmodelclasses if not modelclass supportsflashattn2 return config inputsdict self modeltester prepareconfigandinputsforcommon model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname modelfa modelclass frompretrained tmpdirname torchdtypetorch bfloat16 useflashattention2true modelfa totorchdevice model modelclass frompretrained tmpdirname torchdtypetorch bfloat16 useflashattention2false model totorchdevice dummyinput inputsdictinputids 1 if dummyinput dtype in torch float32 torch float16 dummyinput dummyinput totorch bfloat16 dummyattentionmask inputsdict getattentionmask none if dummyattentionmask is not none dummyattentionmask dummyattentionmask 1 dummyattentionmask 1 1 dummyattentionmask 1 0 outputs modelinputsdictcodebookidx dummyinput outputhiddenstatestrue outputsfa modelfainputsdictcodebookidx dummyinput outputhiddenstatestrue logits outputs hiddenstates1 logitsfa outputsfa hiddenstates1 assert torch allcloselogitsfa logits atol4e2 rtol4e2 otherinputs outputhiddenstates true if dummyattentionmask is not none otherinputsattentionmask dummyattentionmask outputs modelinputsdictcodebookidx dummyinput otherinputs outputsfa modelfainputsdictcodebookidx dummyinput otherinputs logits outputs hiddenstates1 logitsfa outputsfa hiddenstates1 assert torch allcloselogitsfa1 logits1 atol4e2 rtol4e2 check with inference dropout model train modelfainputsdictcodebookidx dummyinput otherinputs requireflashattn requiretorchgpu pytest mark flashattntest slow def testflashattn2inferencepaddingrightself for modelclass in self allmodelclasses if not modelclass supportsflashattn2 return config inputsdict self modeltester prepareconfigandinputsforcommon model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname modelfa modelclass frompretrained tmpdirname torchdtypetorch bfloat16 useflashattention2true modelfa totorchdevice model modelclass frompretrained tmpdirname torchdtypetorch bfloat16 useflashattention2false model totorchdevice dummyinput inputsdictinputids 1 if dummyinput dtype in torch float32 torch float16 dummyinput dummyinput totorch bfloat16 dummyattentionmask inputsdict getattentionmask none if dummyattentionmask is not none dummyattentionmask dummyattentionmask 1 dummyattentionmask 1 1 dummyattentionmask 1 0 outputs modelinputsdictcodebookidx dummyinput outputhiddenstatestrue outputsfa modelfainputsdictcodebookidx dummyinput outputhiddenstatestrue logits outputs hiddenstates1 logitsfa outputsfa hiddenstates1 assert torch allcloselogitsfa logits atol4e2 rtol4e2 otherinputs outputhiddenstates true if dummyattentionmask is not none otherinputsattentionmask dummyattentionmask outputs modelinputsdictcodebookidx dummyinput otherinputs outputsfa modelfainputsdictcodebookidx dummyinput otherinputs logits outputs hiddenstates1 logitsfa outputsfa hiddenstates1 assert torch allcloselogitsfa 1 logits 1 atol4e2 rtol4e2 requiretorch class barkmodelintegrationtestsunittest testcase cachedproperty def modelself return barkmodel frompretrainedsunobark totorchdevice cachedproperty def processorself return barkprocessor frompretrainedsunobark cachedproperty def inputsself inputids self processorin the light of the moon a little egg lay on a leaf voicepresetenspeaker6 inputids inputids totorchdevice return inputids cachedproperty def semanticgenerationconfigself semanticgenerationconfig barksemanticgenerationconfigself model generationconfig semanticconfig return semanticgenerationconfig cachedproperty def coarsegenerationconfigself coarsegenerationconfig barkcoarsegenerationconfigself model generationconfig coarseacousticsconfig return coarsegenerationconfig cachedproperty def finegenerationconfigself finegenerationconfig barkfinegenerationconfigself model generationconfig fineacousticsconfig return finegenerationconfig slow def testgeneratesemanticself inputids self inputs check first ids expectedoutputids 7363 321 41 1461 6915 952 326 41 41 927 fmt skip greedy decoding with torch nograd outputids self model semantic generate inputids dosamplefalse temperature1 0 semanticgenerationconfigself semanticgenerationconfig self assertlistequaloutputids0 lenexpectedoutputids tolist expectedoutputids slow def testgeneratesemanticearlystopself inputids self inputs mineosp 0 01 check first ids expectedoutputids 7363 321 41 1461 6915 952 326 41 41 927 fmt skip should be able to read mineosp from kwargs with torch nograd torch manualseed0 outputidswithoutmineosp self model semantic generate inputids dosamplefalse temperature0 9 semanticgenerationconfigself semanticgenerationconfig torch manualseed0 outputidskwargs self model semantic generate inputids dosamplefalse temperature0 9 semanticgenerationconfigself semanticgenerationconfig mineospmineosp self assertlistequaloutputidswithoutmineosp0 lenexpectedoutputids tolist expectedoutputids self assertlesslenoutputidskwargs0 tolist lenoutputidswithoutmineosp0 tolist should be able to read mineosp from the semantic generation config self semanticgenerationconfig mineosp mineosp with torch nograd torch manualseed0 outputids self model semantic generate inputids dosamplefalse temperature0 9 semanticgenerationconfigself semanticgenerationconfig self assertequaloutputids shape outputidskwargs shape self assertlesslenoutputids0 tolist lenoutputidswithoutmineosp0 tolist self assertlistequaloutputids0 lenexpectedoutputids tolist expectedoutputids slow def testgeneratecoarseself inputids self inputs historyprompt inputidshistoryprompt check first ids expectedoutputids 11018 11391 10651 11418 10857 11620 10642 11366 10312 11528 10531 11516 10474 11051 10524 11051 fmt skip with torch nograd outputids self model semantic generate inputids dosamplefalse temperature1 0 semanticgenerationconfigself semanticgenerationconfig outputids self model coarseacoustics generate outputids historyprompthistoryprompt dosamplefalse temperature1 0 semanticgenerationconfigself semanticgenerationconfig coarsegenerationconfigself coarsegenerationconfig codebooksizeself model generationconfig codebooksize self assertlistequaloutputids0 lenexpectedoutputids tolist expectedoutputids slow def testgeneratefineself inputids self inputs historyprompt inputidshistoryprompt fmt off expectedoutputids 1018 651 857 642 312 531 474 524 524 776 367 394 596 342 504 492 27 27 822 822 961 955 221 955 955 686 939 939 479 176 638 365 218 944 853 363 639 22 884 456 302 912 524 38 174 209 879 23 910 227 440 673 861 666 372 558 49 172 232 342 244 358 123 356 586 520 499 877 542 637 806 685 905 848 803 810 921 208 625 203 fmt on with torch nograd outputids self model semantic generate inputids dosamplefalse temperature1 0 semanticgenerationconfigself semanticgenerationconfig outputids self model coarseacoustics generate outputids historyprompthistoryprompt dosamplefalse temperature1 0 semanticgenerationconfigself semanticgenerationconfig coarsegenerationconfigself coarsegenerationconfig codebooksizeself model generationconfig codebooksize greedy decoding outputids self model fineacoustics generate outputids historyprompthistoryprompt temperaturenone semanticgenerationconfigself semanticgenerationconfig coarsegenerationconfigself coarsegenerationconfig finegenerationconfigself finegenerationconfig codebooksizeself model generationconfig codebooksize self assertlistequaloutputids0 lenexpectedoutputids0 tolist expectedoutputids slow def testgenerateendtoendself inputids self inputs with torch nograd self model generateinputids self model generatekey val for key val in inputids items if key historyprompt slow def testgenerateendtoendwithargsself inputids self inputs with torch nograd self model generateinputids dosampletrue temperature0 6 penaltyalpha0 6 self model generateinputids dosampletrue temperature0 6 numbeams4 slow def testgeneratebatchingself args dosample false temperature none s1 i love huggingface s2 in the light of the moon a little egg lay on a leaf voicepreset enspeaker6 inputids self processors1 s2 voicepresetvoicepreset totorchdevice generate in batch outputs audiolengths self model generateinputids args returnoutputlengthstrue generate onebyone s1 self processors1 voicepresetvoicepreset totorchdevice s2 self processors2 voicepresetvoicepreset totorchdevice output1 self model generates1 args output2 self model generates2 args up until the coarse acoustic model included results are the same the fine acoustic model introduces small differences first verify if same length should be the same because it s decided in the coarse model self assertequaltupleaudiolengths output1 shape1 output2 shape1 then assert almost equal self asserttruetorch allcloseoutputs0 audiolengths0 output1 squeeze atol2e3 self asserttruetorch allcloseoutputs1 audiolengths1 output2 squeeze atol2e3 now test single input with returnoutputlengths true outputs self model generates1 args returnoutputlengthstrue self asserttrueoutputs output1 all item slow def testgenerateendtoendwithsubmodelsargsself inputids self inputs with torch nograd torch manualseed0 self model generate inputids dosamplefalse temperature1 0 coarsedosampletrue coarsetemperature0 7 outputidswithoutmineosp self model generate inputids dosampletrue temperature0 9 coarsedosampletrue coarsetemperature0 7 finetemperature0 3 outputidswithmineosp self model generate inputids dosampletrue temperature0 9 coarsetemperature0 7 finetemperature0 3 mineosp0 1 self assertless lenoutputidswithmineosp0 tolist lenoutputidswithoutmineosp0 tolist requiretorchgpu slow def testgenerateendtoendwithoffloadself inputids self inputs with torch nograd standard generation outputwithnooffload self model generateinputids dosamplefalse temperature1 0 torch cuda emptycache memorybeforeoffload torch cuda memoryallocated modelmemoryfootprint self model getmemoryfootprint activate cpu offload self model enablecpuoffload memoryafteroffload torch cuda memoryallocated checks if the model have been offloaded cuda memory usage after offload should be near 0 leaving room to small differences roomfordifference 1 1 self assertgreater memorybeforeoffload modelmemoryfootprint roomfordifference memoryafteroffload checks if device is the correct one self assertequalself model device type torchdevice checks if hooks exist self asserttruehasattrself model semantic hfhook output with cpu offload outputwithoffload self model generateinputids dosamplefalse temperature1 0 checks if same output self assertlistequaloutputwithnooffload tolist outputwithoffload tolist coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bark model for now training is not supported for barkfinemodel for barkfinemodel first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice test no attention_mask works test that outputs are equal for slice for now training is not supported for barkfinemodel for barkfinemodel first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice test no attention_mask works test that outputs are equal for slice for now training is not supported for barkfinemodel for barkfinemodel randint between self n_codes_given 1 and self n_codes_total 1 first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice test no attention_mask works test that outputs are equal for slice for now training is not supported follow the get_pipeline_config of the sub component models no model_parallel for now same tester as barksemanticmodeltest except for model_class and config_class no model_parallel for now no model_parallel for now torchscript disabled for now because forward with an int take first codebook channel toy generation_configs signature parameters is an ordereddict so arg_names order is deterministic one embedding layer per codebook resizing tokens_embeddings of a modulelist retrieve the embeddings and clone theme check that resizing the token embeddings with a larger vocab size increases the model s vocab size check that it actually resizes the embeddings matrix for each codebook check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary check that adding and removing tokens has not modified the first part of the embedding matrix only check for the first embedding matrix resizing tokens_embeddings of a modulelist if no output embeddings leave test check that resizing the token embeddings with a larger vocab size increases the model s vocab size check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check bias if present check that the model can still do a forward pass successfully every parameter should be resized input ids should be clamped to the maximum size of the vocabulary check that the model can still do a forward pass successfully every parameter should be resized check with inference dropout check first ids fmt skip greedy decoding check first ids fmt skip should be able to read min_eos_p from kwargs should be able to read min_eos_p from the semantic generation config check first ids fmt skip fmt off fmt on greedy decoding generate in batch generate one by one up until the coarse acoustic model included results are the same the fine acoustic model introduces small differences first verify if same length should be the same because it s decided in the coarse model then assert almost equal now test single input with return_output_lengths true standard generation activate cpu offload checks if the model have been offloaded cuda memory usage after offload should be near 0 leaving room to small differences checks if device is the correct one checks if hooks exist output with cpu offload checks if same output
import copy import inspect import tempfile import unittest import pytest from transformers import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, is_torch_available, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_fp16, require_torch_gpu, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ..encodec.test_modeling_encodec import EncodecModelTester if is_torch_available(): import torch from transformers import ( BarkCausalModel, BarkCoarseModel, BarkFineModel, BarkModel, BarkProcessor, BarkSemanticModel, ) class BarkSemanticModelTester: def __init__( self, parent, batch_size=2, seq_length=4, is_training=False, use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, n_codes_given=1, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return BarkSemanticConfig( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 config.output_vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BarkSemanticModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkCoarseModelTester: def __init__( self, parent, batch_size=2, seq_length=4, is_training=False, use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, n_codes_given=1, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return BarkCoarseConfig( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 config.output_vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BarkCoarseModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkFineModelTester: def __init__( self, parent, batch_size=2, seq_length=4, is_training=False, use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, n_codes_given=1, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length, self.n_codes_total], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) codebook_idx = ids_tensor((1,), self.n_codes_total - self.n_codes_given).item() + self.n_codes_given inputs_dict = { "codebook_idx": codebook_idx, "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return BarkFineConfig( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 config.output_vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BarkFineModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkModelTester: def __init__( self, parent, semantic_kwargs=None, coarse_acoustics_kwargs=None, fine_acoustics_kwargs=None, codec_kwargs=None, is_training=False, ): if semantic_kwargs is None: semantic_kwargs = {} if coarse_acoustics_kwargs is None: coarse_acoustics_kwargs = {} if fine_acoustics_kwargs is None: fine_acoustics_kwargs = {} if codec_kwargs is None: codec_kwargs = {} self.parent = parent self.semantic_model_tester = BarkSemanticModelTester(parent, **semantic_kwargs) self.coarse_acoustics_model_tester = BarkCoarseModelTester(parent, **coarse_acoustics_kwargs) self.fine_acoustics_model_tester = BarkFineModelTester(parent, **fine_acoustics_kwargs) self.codec_model_tester = EncodecModelTester(parent, **codec_kwargs) self.is_training = is_training def get_config(self): return BarkConfig.from_sub_model_configs( self.semantic_model_tester.get_config(), self.coarse_acoustics_model_tester.get_config(), self.fine_acoustics_model_tester.get_config(), self.codec_model_tester.get_config(), ) def get_pipeline_config(self): config = self.get_config() config.semantic_config.vocab_size = 300 config.coarse_acoustics_config.vocab_size = 300 config.fine_acoustics_config.vocab_size = 300 config.semantic_config.output_vocab_size = 300 config.coarse_acoustics_config.output_vocab_size = 300 config.fine_acoustics_config.output_vocab_size = 300 return config @require_torch class BarkSemanticModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BarkSemanticModel,) if is_torch_available() else () all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = True def setUp(self): self.model_tester = BarkSemanticModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkSemanticConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @require_torch class BarkCoarseModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BarkCoarseModel,) if is_torch_available() else () all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = True def setUp(self): self.model_tester = BarkCoarseModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkCoarseConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @require_torch class BarkFineModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BarkFineModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_torchscript = False test_resize_embeddings = True def setUp(self): self.model_tester = BarkFineModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkFineConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings()[inputs_dict["codebook_idx"]] inputs["input_embeds"] = wte(input_ids[:, :, inputs_dict["codebook_idx"]]) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] model = self.all_model_classes[0](config).eval().to(torch_device) model.half() semantic_generation_config = BarkSemanticGenerationConfig(semantic_vocab_size=0) coarse_generation_config = BarkCoarseGenerationConfig(n_coarse_codebooks=config.n_codes_given) fine_generation_config = BarkFineGenerationConfig( max_fine_history_length=config.block_size // 2, max_fine_input_length=config.block_size, n_fine_codebooks=config.n_codes_total, ) codebook_size = config.vocab_size - 1 model.generate( input_ids, history_prompt=None, temperature=None, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=codebook_size, ) model.generate( input_ids, history_prompt=None, temperature=0.7, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=codebook_size, ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["codebook_idx", "input_ids"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings()[0], (torch.nn.Embedding)) model.set_input_embeddings( torch.nn.ModuleList([torch.nn.Embedding(10, 10) for _ in range(config.n_codes_total)]) ) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x[0], torch.nn.Linear)) def test_resize_tokens_embeddings(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size model_embed_list = model.resize_token_embeddings(model_vocab_size) cloned_embeddings_list = [model_embed.weight.clone() for model_embed in model_embed_list] model_embed_list = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list): self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model_embed_list = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list): self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) models_equal = True for p1, p2 in zip(cloned_embeddings_list[0], model_embed_list[0].weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) if model.get_output_embeddings() is None: continue model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds_list = model.get_output_embeddings() for output_embeds in output_embeds_list: self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) output_embeds_list = model.get_output_embeddings() for output_embeds in output_embeds_list: self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False ) model.to(torch_device) dummy_input = inputs_dict["input_ids"][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) other_inputs = {"output_hidden_states": True} if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) model.train() _ = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False ) model.to(torch_device) dummy_input = inputs_dict["input_ids"][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @require_torch class BarkModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return BarkModel.from_pretrained("suno/bark").to(torch_device) @cached_property def processor(self): return BarkProcessor.from_pretrained("suno/bark") @cached_property def inputs(self): input_ids = self.processor("In the light of the moon, a little egg lay on a leaf", voice_preset="en_speaker_6") input_ids = input_ids.to(torch_device) return input_ids @cached_property def semantic_generation_config(self): semantic_generation_config = BarkSemanticGenerationConfig(**self.model.generation_config.semantic_config) return semantic_generation_config @cached_property def coarse_generation_config(self): coarse_generation_config = BarkCoarseGenerationConfig(**self.model.generation_config.coarse_acoustics_config) return coarse_generation_config @cached_property def fine_generation_config(self): fine_generation_config = BarkFineGenerationConfig(**self.model.generation_config.fine_acoustics_config) return fine_generation_config @slow def test_generate_semantic(self): input_ids = self.inputs expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, ) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_semantic_early_stop(self): input_ids = self.inputs min_eos_p = 0.01 expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] with torch.no_grad(): torch.manual_seed(0) output_ids_without_min_eos_p = self.model.semantic.generate( **input_ids, do_sample=False, temperature=0.9, semantic_generation_config=self.semantic_generation_config, ) torch.manual_seed(0) output_ids_kwargs = self.model.semantic.generate( **input_ids, do_sample=False, temperature=0.9, semantic_generation_config=self.semantic_generation_config, min_eos_p=min_eos_p, ) self.assertListEqual(output_ids_without_min_eos_p[0, : len(expected_output_ids)].tolist(), expected_output_ids) self.assertLess(len(output_ids_kwargs[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist())) self.semantic_generation_config.min_eos_p = min_eos_p with torch.no_grad(): torch.manual_seed(0) output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=0.9, semantic_generation_config=self.semantic_generation_config, ) self.assertEqual(output_ids.shape, output_ids_kwargs.shape) self.assertLess(len(output_ids[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist())) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_coarse(self): input_ids = self.inputs history_prompt = input_ids["history_prompt"] expected_output_ids = [11018, 11391, 10651, 11418, 10857, 11620, 10642, 11366, 10312, 11528, 10531, 11516, 10474, 11051, 10524, 11051, ] with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, ) output_ids = self.model.coarse_acoustics.generate( output_ids, history_prompt=history_prompt, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, codebook_size=self.model.generation_config.codebook_size, ) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_fine(self): input_ids = self.inputs history_prompt = input_ids["history_prompt"] expected_output_ids = [ [1018, 651, 857, 642, 312, 531, 474, 524, 524, 776,], [367, 394, 596, 342, 504, 492, 27, 27, 822, 822,], [961, 955, 221, 955, 955, 686, 939, 939, 479, 176,], [638, 365, 218, 944, 853, 363, 639, 22, 884, 456,], [302, 912, 524, 38, 174, 209, 879, 23, 910, 227,], [440, 673, 861, 666, 372, 558, 49, 172, 232, 342,], [244, 358, 123, 356, 586, 520, 499, 877, 542, 637,], [806, 685, 905, 848, 803, 810, 921, 208, 625, 203,], ] with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, ) output_ids = self.model.coarse_acoustics.generate( output_ids, history_prompt=history_prompt, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, codebook_size=self.model.generation_config.codebook_size, ) output_ids = self.model.fine_acoustics.generate( output_ids, history_prompt=history_prompt, temperature=None, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, fine_generation_config=self.fine_generation_config, codebook_size=self.model.generation_config.codebook_size, ) self.assertListEqual(output_ids[0, :, : len(expected_output_ids[0])].tolist(), expected_output_ids) @slow def test_generate_end_to_end(self): input_ids = self.inputs with torch.no_grad(): self.model.generate(**input_ids) self.model.generate(**{key: val for (key, val) in input_ids.items() if key != "history_prompt"}) @slow def test_generate_end_to_end_with_args(self): input_ids = self.inputs with torch.no_grad(): self.model.generate(**input_ids, do_sample=True, temperature=0.6, penalty_alpha=0.6) self.model.generate(**input_ids, do_sample=True, temperature=0.6, num_beams=4) @slow def test_generate_batching(self): args = {"do_sample": False, "temperature": None} s1 = "I love HuggingFace" s2 = "In the light of the moon, a little egg lay on a leaf" voice_preset = "en_speaker_6" input_ids = self.processor([s1, s2], voice_preset=voice_preset).to(torch_device) outputs, audio_lengths = self.model.generate(**input_ids, **args, return_output_lengths=True) s1 = self.processor(s1, voice_preset=voice_preset).to(torch_device) s2 = self.processor(s2, voice_preset=voice_preset).to(torch_device) output1 = self.model.generate(**s1, **args) output2 = self.model.generate(**s2, **args) self.assertEqual(tuple(audio_lengths), (output1.shape[1], output2.shape[1])) self.assertTrue(torch.allclose(outputs[0, : audio_lengths[0]], output1.squeeze(), atol=2e-3)) self.assertTrue(torch.allclose(outputs[1, : audio_lengths[1]], output2.squeeze(), atol=2e-3)) outputs, _ = self.model.generate(**s1, **args, return_output_lengths=True) self.assertTrue((outputs == output1).all().item()) @slow def test_generate_end_to_end_with_sub_models_args(self): input_ids = self.inputs with torch.no_grad(): torch.manual_seed(0) self.model.generate( **input_ids, do_sample=False, temperature=1.0, coarse_do_sample=True, coarse_temperature=0.7 ) output_ids_without_min_eos_p = self.model.generate( **input_ids, do_sample=True, temperature=0.9, coarse_do_sample=True, coarse_temperature=0.7, fine_temperature=0.3, ) output_ids_with_min_eos_p = self.model.generate( **input_ids, do_sample=True, temperature=0.9, coarse_temperature=0.7, fine_temperature=0.3, min_eos_p=0.1, ) self.assertLess( len(output_ids_with_min_eos_p[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist()) ) @require_torch_gpu @slow def test_generate_end_to_end_with_offload(self): input_ids = self.inputs with torch.no_grad(): output_with_no_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0) torch.cuda.empty_cache() memory_before_offload = torch.cuda.memory_allocated() model_memory_footprint = self.model.get_memory_footprint() self.model.enable_cpu_offload() memory_after_offload = torch.cuda.memory_allocated() room_for_difference = 1.1 self.assertGreater( (memory_before_offload - model_memory_footprint) * room_for_difference, memory_after_offload ) self.assertEqual(self.model.device.type, torch_device) self.assertTrue(hasattr(self.model.semantic, "_hf_hook")) output_with_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0) self.assertListEqual(output_with_no_offload.tolist(), output_with_offload.tolist())
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test providing already loaded voicepreset test loading voice preset from npz file test loading voice preset from the hub 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test providing already loaded voice_preset test loading voice preset from npz file test loading voice preset from the hub
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class BarkProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "suno/bark-small" self.tmpdirname = tempfile.mkdtemp() self.voice_preset = "en_speaker_1" self.input_string = "This is a test string" self.speaker_embeddings_dict_path = "speaker_embeddings_path.json" self.speaker_embeddings_directory = "speaker_embeddings" def get_tokenizer(self, **kwargs): return AutoTokenizer.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() processor = BarkProcessor(tokenizer=tokenizer) processor.save_pretrained(self.tmpdirname) processor = BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) @slow def test_save_load_pretrained_additional_features(self): processor = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, ) processor.save_pretrained( self.tmpdirname, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, speaker_embeddings_directory=self.speaker_embeddings_directory, ) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") processor = BarkProcessor.from_pretrained( self.tmpdirname, self.speaker_embeddings_dict_path, bos_token="(BOS)", eos_token="(EOS)", ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) def test_speaker_embeddings(self): processor = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, ) seq_len = 35 nb_codebooks_coarse = 2 nb_codebooks_total = 8 voice_preset = { "semantic_prompt": np.ones(seq_len), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)), "fine_prompt": np.ones((nb_codebooks_total, seq_len)), } inputs = processor(text=self.input_string, voice_preset=voice_preset) processed_voice_preset = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist()) tmpfilename = os.path.join(self.tmpdirname, "file.npz") np.savez(tmpfilename, **voice_preset) inputs = processor(text=self.input_string, voice_preset=tmpfilename) processed_voice_preset = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist()) inputs = processor(text=self.input_string, voice_preset=self.voice_preset) def test_tokenizer(self): tokenizer = self.get_tokenizer() processor = BarkProcessor(tokenizer=tokenizer) encoded_processor = processor(text=self.input_string) encoded_tok = tokenizer( self.input_string, padding="max_length", max_length=256, add_special_tokens=False, return_attention_mask=True, return_token_type_ids=False, ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key].squeeze().tolist())
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs ioenlatestgpumemoryallocation html timeoutdecorator timeout1 not working with the decorator so far flaxbartforsequenceclassification expects eos token in inputids the below article tests that we don t add any hypotheses outside of the top nbeams 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs io en latest gpu_memory_allocation html note padding timeout_decorator timeout 1 not working with the decorator so far flaxbartforsequenceclassification expects eos token in input_ids noq the below article tests that we don t add any hypotheses outside of the top n_beams
import unittest import numpy as np import timeout_decorator from transformers import BartConfig, BartTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import os os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers.models.bart.modeling_flax_bart import ( FlaxBartForConditionalGeneration, FlaxBartForQuestionAnswering, FlaxBartForSequenceClassification, FlaxBartModel, shift_tokens_right, ) def prepare_bart_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) if decoder_attention_mask is None: decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class FlaxBartModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=32, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) decoder_input_ids = shift_tokens_right(input_ids, 1, 2) config = BartConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_bart_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class BartHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=np.int64, ) batch_size = input_ids.shape[0] config = BartConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size def test_sequence_classification_forward(self): config, input_ids, batch_size = self._get_config_and_data() model = FlaxBartForSequenceClassification(config) outputs = model(input_ids=input_ids, decoder_input_ids=input_ids) expected_shape = (batch_size, config.num_labels) self.assertEqual(outputs["logits"].shape, expected_shape) def test_question_answering_forward(self): config, input_ids, batch_size = self._get_config_and_data() model = FlaxBartForQuestionAnswering(config) outputs = model(input_ids=input_ids) self.assertEqual(outputs["start_logits"].shape, input_ids.shape) self.assertEqual(outputs["end_logits"].shape, input_ids.shape) def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_model = FlaxBartForConditionalGeneration(config) outputs = lm_model(input_ids=input_ids) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_lm_uneven_forward(self): config = BartConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = FlaxBartForConditionalGeneration(config) context = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.int64) summary = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.int64) outputs = lm_model(input_ids=context, decoder_input_ids=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_shift_tokens_right(self): input_ids = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.int64) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = np.equal(input_ids, 1).astype(np.float32).sum() n_pad_after = np.equal(shifted, 1).astype(np.float32).sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0], 2).all()) @require_flax class FlaxBartModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): is_encoder_decoder = True all_model_classes = ( ( FlaxBartModel, FlaxBartForConditionalGeneration, FlaxBartForSequenceClassification, FlaxBartForQuestionAnswering, ) if is_flax_available() else () ) all_generative_model_classes = (FlaxBartForConditionalGeneration,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxBartModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/bart-base", from_pt=True) input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs) @slow def test_summarization_fast(self): model = FlaxBartForConditionalGeneration.from_pretrained("sshleifer/distilbart-cnn-6-6") tokenizer = BartTokenizer.from_pretrained("sshleifer/distilbart-cnn-6-6") input_str = ( "This sentence is made of three parts. Each part is important on its own. One part is about animals, the" " other part about planes, and the last part about housing." ) input_ids = tokenizer(input_str, return_tensors="np").input_ids sequences = model.generate(input_ids, num_beams=2, min_length=None, max_length=20).sequences output_str = tokenizer.batch_decode(sequences)[0] assert ( output_str == "</s><s>This sentence is made of three parts. One part is about animals, the other part</s>" ) @slow def test_cnn_summarization_same_as_fairseq(self): model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") FRANCE_ARTICLE = ( " Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( " (CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) dct = tokenizer.batch_encode_plus( [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY], max_length=1024, padding="max_length", truncation_strategy="only_first", truncation=True, return_tensors="np", ) self.assertEqual(1024, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=2, ).sequences assert (hypotheses_batch[:, 1] == 0).all().item() EXPECTED = [ "A French prosecutor says he is not aware of any video footage from on board the plane. Two German" " magazines claim to have found a cell phone video showing the crash. The publications say they watched" " the video, which was found by a source close to the investigation. All 150 on board the Germanwings" " flight were killed.", "Palestinian Authority becomes 123rd member of the International Criminal Court. The move gives the court" " jurisdiction over alleged crimes in Palestinian territories. Israel and the United States opposed the" " Palestinians' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki said it was a" " move toward greater justice.", "U.S. and its negotiating partners reached a strong framework agreement with Iran. Peter Bergen: The" " debate that has already begun will likely result in more heat than light. Bergen: The most misleading" " assertion is that the negotiations' objective at the outset was the total elimination of any nuclear" " program.", "Liana Barrientos, 39, has been married 10 times, sometimes within two weeks of each other. Prosecutors" " say the marriages were part of an immigration scam. She pleaded not guilty at State Supreme Court in the" " Bronx on Friday. If convicted, Barrientos faces up to four years in prison.", ] generated_summaries = tokenizer.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated_summaries == EXPECTED class FlaxBartStandaloneDecoderModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=32, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = jnp.clip(ids_tensor([self.batch_size, self.seq_length], self.vocab_size), 3, self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = BartConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) return config, input_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config, input_ids, attention_mask = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, )
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license ids are clipped to avoid beginng of sequence end of sequence and pad tokens explicity add end of sequence to the inputs first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice todo joao fix me tfbartforsequenceclassification does not support inputsembeds tfbartforsequenceclassification does not support inputsembeds custom version of this test to ensure end of sequence tokens are present throughout create a model with resized expended embeddings fetch the output for an input exclusively made of new members of the vocabulary replace last id with eos token save and load the model check that the output for the restored model is the same the below article tests that we don t add any hypotheses outside of the top nbeams norepeatngramsize set to 0 because it isn t compatible with xla but doesn t change the original output these tests are useful for debugging since they operate on a model with 1 encoder layer and 1 decoder layer cachedproperty def tokself return barttokenizer frompretrainedfacebookbartlarge cachedproperty def xsum11modelself return tfbartforconditionalgeneration frompretrainedsshleiferdistilbartxsum11 def testxsum11generationself model self xsum11model assert model model decoder embedtokens model model shared article the palestinian ity officially became the 123rd member of the international criminal court on wednesday a step that gives the court jurisdiction over alleged crimes in palestinian territories the formal accession was marked with a ceremony at the hague in the netherlands where the court is based the palestinians signed the icc s founding rome statute in january when they also accepted its jurisdiction over alleged crimes committed in the occupied palestinian territory including east jerusalem since june 13 2014 later that month the icc opened a preliminary examination into the situation in palestinian territories paving the way for possible war crimes investigations against israelis as members of the court palestinians may be subject to countercharges as well israel and the united states neither of which is an icc member opposed the palestinians efforts to join the body but palestinian foreign minister riad almalki speaking at wednesday s ceremony said it was a move toward greater justice as palestine formally becomes a state party to the rome statute today the world is also a step closer to ending a long era of impunity and injustice he said according to an icc news release indeed today brings us closer to our shared goals of justice and peace judge kuniko ozaki a vice president of the icc said acceding to the treaty was just the first step for the palestinians as the rome statute today enters into force for the state of palestine palestine acquires all the rights as well as responsibilities that come with being a state party to the statute these are substantive commitments which cannot be taken lightly she said rights group human rights watch welcomed the development governments seeking to penalize palestine for joining the icc should immediately end their pressure and countries that support universal acceptance of the court s treaty should speak out to welcome its membership said balkees jarrah international justice counsel for the group what s objectionable is the attempts to undermine international justice not palestine s decision to join a treaty to which over 100 countries around the world are members in january when the preliminary icc examination was opened israeli prime minister benjamin netanyahu described it as an outrage saying the court was overstepping its boundaries the united states also said it strongly disagreed with the court s decision as we have said repeatedly we do not believe that palestine is a state and therefore we do not believe that it is eligible to join the icc the state department said in a statement it urged the warring sides to resolve their differences through direct negotiations we will continue to oppose actions against israel at the icc as counterproductive to the cause of peace it said but the icc begs to differ with the definition of a state for its purposes and refers to the territories as palestine while a preliminary examination is not a formal investigation it allows the court to review evidence and determine whether to investigate suspects on both sides prosecutor fatou bensouda said her office would conduct its analysis in full independence and impartiality the war between israel and hamas militants in gaza last summer left more than 2 000 people dead the inquiry will include alleged war crimes committed since june the international criminal court was set up in 2002 to prosecute genocide crimes against humanity and war crimes expected the international criminal court icc has announced that it has been announced by the international criminal court dct self tokarticle returntensorstf generatedids model generatedct numbeams4 result self tok batchdecodegeneratedids skipspecialtokenstrue0 assert result expected def testxsum11xlagenerationself same test as above but with norepeatngramsize0 not compatible with xla and xla comparison enabled model self xsum11model assert model model decoder embedtokens model model shared article the palestinian ity officially became the 123rd member of the international criminal court on wednesday a step that gives the court jurisdiction over alleged crimes in palestinian territories the formal accession was marked with a ceremony at the hague in the netherlands where the court is based the palestinians signed the icc s founding rome statute in january when they also accepted its jurisdiction over alleged crimes committed in the occupied palestinian territory including east jerusalem since june 13 2014 later that month the icc opened a preliminary examination into the situation in palestinian territories paving the way for possible war crimes investigations against israelis as members of the court palestinians may be subject to countercharges as well israel and the united states neither of which is an icc member opposed the palestinians efforts to join the body but palestinian foreign minister riad almalki speaking at wednesday s ceremony said it was a move toward greater justice as palestine formally becomes a state party to the rome statute today the world is also a step closer to ending a long era of impunity and injustice he said according to an icc news release indeed today brings us closer to our shared goals of justice and peace judge kuniko ozaki a vice president of the icc said acceding to the treaty was just the first step for the palestinians as the rome statute today enters into force for the state of palestine palestine acquires all the rights as well as responsibilities that come with being a state party to the statute these are substantive commitments which cannot be taken lightly she said rights group human rights watch welcomed the development governments seeking to penalize palestine for joining the icc should immediately end their pressure and countries that support universal acceptance of the court s treaty should speak out to welcome its membership said balkees jarrah international justice counsel for the group what s objectionable is the attempts to undermine international justice not palestine s decision to join a treaty to which over 100 countries around the world are members in january when the preliminary icc examination was opened israeli prime minister benjamin netanyahu described it as an outrage saying the court was overstepping its boundaries the united states also said it strongly disagreed with the court s decision as we have said repeatedly we do not believe that palestine is a state and therefore we do not believe that it is eligible to join the icc the state department said in a statement it urged the warring sides to resolve their differences through direct negotiations we will continue to oppose actions against israel at the icc as counterproductive to the cause of peace it said but the icc begs to differ with the definition of a state for its purposes and refers to the territories as palestine while a preliminary examination is not a formal investigation it allows the court to review evidence and determine whether to investigate suspects on both sides prosecutor fatou bensouda said her office would conduct its analysis in full independence and impartiality the war between israel and hamas militants in gaza last summer left more than 2 000 people dead the inquiry will include alleged war crimes committed since june the international criminal court was set up in 2002 to prosecute genocide crimes against humanity and war crimes expected the international criminal court icc has announced that it is to be investigated by the international criminal court icc over allegations of war crimes dct self tokarticle returntensorstf generatedids model generatedct numbeams4 norepeatngramsize0 result self tok batchdecodegeneratedids skipspecialtokenstrue0 assert result expected xlagenerate tf functionmodel generate jitcompiletrue generatedids xlageneratedct numbeams4 norepeatngramsize0 result self tok batchdecodegeneratedids skipspecialtokenstrue0 assert result expected def testxsum11batchgenerationself batch self tok the palestinian ity officially became the 123rd member of the international criminal court on wednesday a step that gives the court jurisdiction over alleged crimes in palestinian territories the formal accession was marked with a ceremony at the hague in the netherlands where the court is based the palestinians signed the icc s founding rome statute in january when they also accepted its jurisdiction over alleged crimes committed in the occupied palestinian territory including east jerusalem since june 13 2014 later that month the icc opened a preliminary examination into the situation in palestinian territories paving the way for possible war crimes investigations against israelis as members of the court palestinians may be subject to countercharges as well israel and the united states neither of which is an icc member opposed the palestinians efforts to join the body but palestinian foreign minister riad almalki speaking at wednesday s ceremony said it was a move toward greater justice as palestine formally becomes a state party to the rome statute today the world is also a step closer to ending a long era of impunity and injustice he said according to an icc news release indeed today brings us closer to our shared goals of justice and peace judge kuniko ozaki a vice president of the icc said acceding to the treaty was just the first step for the palestinians as the rome statute today enters into force for the state of palestine palestine acquires all the rights as well as responsibilities that come with being a state party to the statute these are substantive commitments which cannot be taken lightly she said rights group human rights watch welcomed the development governments seeking to penalize palestine for joining the icc should immediately end their pressure and countries that support universal acceptance of the court s treaty should speak out to welcome its membership said balkees jarrah international justice counsel for the group what s objectionable is the attempts to undermine international justice not palestine s decision to join a treaty to which over 100 countries around the world are members in january when the preliminary icc examination was opened israeli prime minister benjamin netanyahu described it as an outrage saying the court was overstepping its boundaries the united states also said it strongly disagreed with the court s decision as we have said repeatedly we do not believe that palestine is a state and therefore we do not believe that it is eligible to join the icc the state department said in a statement it urged the warring sides to resolve their differences through direct negotiations we will continue to oppose actions against israel at the icc as counterproductive to the cause of peace it said but the icc begs to differ with the definition of a state for its purposes and refers to the territories as palestine while a preliminary examination is not a formal investigation it allows the court to review evidence and determine whether to investigate suspects on both sides prosecutor fatou bensouda said her office would conduct its analysis in full independence and impartiality the war between israel and hamas militants in gaza last summer left more than 2 000 people dead the inquiry will include alleged war crimes committed since june the international criminal court was set up in 2002 to prosecute genocide crimes against humanity and war crimes the french prosecutor leading an investigation into the crash of germanwings flight 9525 insisted wednesday that he was not aware of any video footage from on board the plane marseille prosecutor brice robin told cnn that so far no videos were used in the crash investigation he added a person who has such a video needs to immediately give it to the investigators robin s comments follow claims by two magazines german daily bild and french paris match of a cell phone video showing the harrowing final seconds from on board germanwings flight 9525 as it crashed into the french alps all 150 on board were killed paris match and bild reported that the video was recovered from a phone at the wreckage site the two publications described the supposed video but did not post it on their websites the publications said that they watched the video which was found by a source close to the investigation one can hear cries of my god in several languages paris match reported metallic banging can also be heard more than three times perhaps of the pilot trying to open the cockpit door with a heavy object towards the end after a heavy shake stronger than the others the screaming intensifies then nothing it is a very disturbing scene said julian reichelt editorinchief of bild online an official with france s accident investigation agency the bea said the agency is not aware of any such video lt col jeanmarc menichini a french gendarmerie spokesman in charge of communications on rescue efforts around the germanwings crash site told cnn that the reports were completely wrong and unwarranted cell phones have been collected at the site he said but that they hadn t been exploited yet menichini said he believed the cell phones would need to be sent to the criminal research institute in rosny sousbois near paris in order to be analyzed by specialized technicians working handinhand with investigators but none of the cell phones found so far have been sent to the institute menichini said asked whether staff involved in the search could have leaked a memory card to the media menichini answered with a categorical no reichelt told erin burnett outfront that he had watched the video and stood by the report saying bild and paris match are very confident that the clip is real he noted that investigators only revealed they d recovered cell phones from the crash site after bild and paris match published their reports that is something we did not know before overall we can say many things of the investigation weren t revealed by the investigation at the beginning he said what was mental state of germanwings copilot german airline lufthansa confirmed tuesday that copilot andreas lubitz had battled depression years before he took the controls of germanwings flight 9525 which he s accused of deliberately crashing last week in the french alps lubitz told his lufthansa flight training school in 2009 that he had a previous episode of severe depression the airline said tuesday email correspondence between lubitz and the school discovered in an internal investigation lufthansa said included medical documents he submitted in connection with resuming his flight training the announcement indicates that lufthansa the parent company of germanwings knew of lubitz s battle with depression allowed him to continue training and ultimately put him in the cockpit lufthansa whose ceo carsten spohr previously said lubitz was 100 fit to fly described its statement tuesday as a swift and seamless clarification and said it was sharing the information and documents including training and medical records with public prosecutors spohr traveled to the crash site wednesday where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside he saw the crisis center set up in seynelesalpes laid a wreath in the village of le vernet closer to the crash site where grieving families have left flowers at a simple stone memorial menichini told cnn late tuesday that no visible human remains were left at the site but recovery teams would keep searching french president francois hollande speaking tuesday said that it should be possible to identify all the victims using dna analysis by the end of the week sooner than ities had previously suggested in the meantime the recovery of the victims personal belongings will start wednesday menichini said among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board check out the latest from our correspondents the details about lubitz s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and lubitz s possible motive for downing the jet a lufthansa spokesperson told cnn on tuesday that lubitz had a valid medical certificate had passed all his examinations and held all the licenses required earlier a spokesman for the prosecutor s office in dusseldorf christoph kumpa said medical records reveal lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot s license kumpa emphasized there s no evidence suggesting lubitz was suicidal or acting aggressively before the crash investigators are looking into whether lubitz feared his medical condition would cause him to lose his pilot s license a european government official briefed on the investigation told cnn on tuesday while flying was a big part of his life the source said it s only one theory being considered another source a law enforcement official briefed on the investigation also told cnn that ities believe the primary motive for lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems lubitz s girlfriend told investigators he had seen an eye doctor and a neuropsychologist both of whom deemed him unfit to work recently and concluded he had psychological issues the european government official said but no matter what details emerge about his previous mental health struggles there s more to the story said brian russell a forensic psychologist psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren t going to keep doing their job and they re upset about that and so they re suicidal he said but there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person s problems germanwings crash compensation what we know who was the captain of germanwings flight 9525 cnn s margot haddad reported from marseille and pamela brown from dusseldorf while laura smithspark wrote from london cnn s frederik pleitgen pamela boykoff antonia mortensen sandrine amiel and annamaja rappard contributed to this report returntensorstf paddinglongest truncationtrue generatedids self xsum11model generatebatch numbeams4 result self tok batchdecodegeneratedids skipspecialtokenstrue assert result0 the international criminal court icc has announced that it has been announced by the international criminal court assert result1 an investigation into the crash that killed at least 10 people in the french capital has been released by the french police investigating the crash def testencoderequivself batch self tok the palestinian ity officially became the 123rd member of the international criminal court on wednesday a step that gives the court jurisdiction over alleged crimes in palestinian territories the formal accession was marked with a ceremony at the hague in the netherlands where the court is based the palestinians signed the icc s founding rome statute in january when they also accepted its jurisdiction over alleged crimes committed in the occupied palestinian territory including east jerusalem since june 13 2014 later that month the icc opened a preliminary examination into the situation in palestinian territories paving the way for possible war crimes investigations against israelis as members of the court palestinians may be subject to countercharges as well israel and the united states neither of which is an icc member opposed the palestinians efforts to join the body but palestinian foreign minister riad almalki speaking at wednesday s ceremony said it was a move toward greater justice as palestine formally becomes a state party to the rome statute today the world is also a step closer to ending a long era of impunity and injustice he said according to an icc news release indeed today brings us closer to our shared goals of justice and peace judge kuniko ozaki a vice president of the icc said acceding to the treaty was just the first step for the palestinians as the rome statute today enters into force for the state of palestine palestine acquires all the rights as well as responsibilities that come with being a state party to the statute these are substantive commitments which cannot be taken lightly she said rights group human rights watch welcomed the development governments seeking to penalize palestine for joining the icc should immediately end their pressure and countries that support universal acceptance of the court s treaty should speak out to welcome its membership said balkees jarrah international justice counsel for the group what s objectionable is the attempts to undermine international justice not palestine s decision to join a treaty to which over 100 countries around the world are members in january when the preliminary icc examination was opened israeli prime minister benjamin netanyahu described it as an outrage saying the court was overstepping its boundaries the united states also said it strongly disagreed with the court s decision as we have said repeatedly we do not believe that palestine is a state and therefore we do not believe that it is eligible to join the icc the state department said in a statement it urged the warring sides to resolve their differences through direct negotiations we will continue to oppose actions against israel at the icc as counterproductive to the cause of peace it said but the icc begs to differ with the definition of a state for its purposes and refers to the territories as palestine while a preliminary examination is not a formal investigation it allows the court to review evidence and determine whether to investigate suspects on both sides prosecutor fatou bensouda said her office would conduct its analysis in full independence and impartiality the war between israel and hamas militants in gaza last summer left more than 2 000 people dead the inquiry will include alleged war crimes committed since june the international criminal court was set up in 2002 to prosecute genocide crimes against humanity and war crimes the french prosecutor leading an investigation into the crash of germanwings flight 9525 insisted wednesday that he was not aware of any video footage from on board the plane marseille prosecutor brice robin told cnn that so far no videos were used in the crash investigation he added a person who has such a video needs to immediately give it to the investigators robin s comments follow claims by two magazines german daily bild and french paris match of a cell phone video showing the harrowing final seconds from on board germanwings flight 9525 as it crashed into the french alps all 150 on board were killed paris match and bild reported that the video was recovered from a phone at the wreckage site the two publications described the supposed video but did not post it on their websites the publications said that they watched the video which was found by a source close to the investigation one can hear cries of my god in several languages paris match reported metallic banging can also be heard more than three times perhaps of the pilot trying to open the cockpit door with a heavy object towards the end after a heavy shake stronger than the others the screaming intensifies then nothing it is a very disturbing scene said julian reichelt editorinchief of bild online an official with france s accident investigation agency the bea said the agency is not aware of any such video lt col jeanmarc menichini a french gendarmerie spokesman in charge of communications on rescue efforts around the germanwings crash site told cnn that the reports were completely wrong and unwarranted cell phones have been collected at the site he said but that they hadn t been exploited yet menichini said he believed the cell phones would need to be sent to the criminal research institute in rosny sousbois near paris in order to be analyzed by specialized technicians working handinhand with investigators but none of the cell phones found so far have been sent to the institute menichini said asked whether staff involved in the search could have leaked a memory card to the media menichini answered with a categorical no reichelt told erin burnett outfront that he had watched the video and stood by the report saying bild and paris match are very confident that the clip is real he noted that investigators only revealed they d recovered cell phones from the crash site after bild and paris match published their reports that is something we did not know before overall we can say many things of the investigation weren t revealed by the investigation at the beginning he said what was mental state of germanwings copilot german airline lufthansa confirmed tuesday that copilot andreas lubitz had battled depression years before he took the controls of germanwings flight 9525 which he s accused of deliberately crashing last week in the french alps lubitz told his lufthansa flight training school in 2009 that he had a previous episode of severe depression the airline said tuesday email correspondence between lubitz and the school discovered in an internal investigation lufthansa said included medical documents he submitted in connection with resuming his flight training the announcement indicates that lufthansa the parent company of germanwings knew of lubitz s battle with depression allowed him to continue training and ultimately put him in the cockpit lufthansa whose ceo carsten spohr previously said lubitz was 100 fit to fly described its statement tuesday as a swift and seamless clarification and said it was sharing the information and documents including training and medical records with public prosecutors spohr traveled to the crash site wednesday where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside he saw the crisis center set up in seynelesalpes laid a wreath in the village of le vernet closer to the crash site where grieving families have left flowers at a simple stone memorial menichini told cnn late tuesday that no visible human remains were left at the site but recovery teams would keep searching french president francois hollande speaking tuesday said that it should be possible to identify all the victims using dna analysis by the end of the week sooner than ities had previously suggested in the meantime the recovery of the victims personal belongings will start wednesday menichini said among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board check out the latest from our correspondents the details about lubitz s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and lubitz s possible motive for downing the jet a lufthansa spokesperson told cnn on tuesday that lubitz had a valid medical certificate had passed all his examinations and held all the licenses required earlier a spokesman for the prosecutor s office in dusseldorf christoph kumpa said medical records reveal lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot s license kumpa emphasized there s no evidence suggesting lubitz was suicidal or acting aggressively before the crash investigators are looking into whether lubitz feared his medical condition would cause him to lose his pilot s license a european government official briefed on the investigation told cnn on tuesday while flying was a big part of his life the source said it s only one theory being considered another source a law enforcement official briefed on the investigation also told cnn that ities believe the primary motive for lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems lubitz s girlfriend told investigators he had seen an eye doctor and a neuropsychologist both of whom deemed him unfit to work recently and concluded he had psychological issues the european government official said but no matter what details emerge about his previous mental health struggles there s more to the story said brian russell a forensic psychologist psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren t going to keep doing their job and they re upset about that and so they re suicidal he said but there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person s problems germanwings crash compensation what we know who was the captain of germanwings flight 9525 cnn s margot haddad reported from marseille and pamela brown from dusseldorf while laura smithspark wrote from london cnn s frederik pleitgen pamela boykoff antonia mortensen sandrine amiel and annamaja rappard contributed to this report returntensorstf paddinglongest truncationtrue features self xsum11model getencoderbatch lasthiddenstate expected np array0 0828 0 0251 0 0674 0 1277 0 3311 0 0255 0 2613 0 0840 0 2763 assert np allclosefeatures0 3 3 numpy expected atol1e3 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license ids are clipped to avoid beginng of sequence end of sequence and pad tokens explicity add end of sequence to the inputs first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice todo joao fix me tfbartforsequenceclassification does not support inputs_embeds tfbartforsequenceclassification does not support inputs_embeds custom version of this test to ensure end of sequence tokens are present throughout create a model with resized expended embeddings resize_token_embeddings mutates config fetch the output for an input exclusively made of new members of the vocabulary replace last id with eos token save and load the model check that the output for the restored model is the same noqa the below article tests that we don t add any hypotheses outside of the top n_beams test force_bos_token_to_be_generated no_repeat_ngram_size set to 0 because it isn t compatible with xla but doesn t change the original output these tests are useful for debugging since they operate on a model with 1 encoder layer and 1 decoder layer same test as above but with no_repeat_ngram_size 0 not compatible with xla and xla comparison enabled
from __future__ import annotations import copy import tempfile import unittest import numpy as np from transformers import BartConfig, BartTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBartModel @require_tf class TFBartModelTester: config_cls = BartConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), clip_value_min=self.eos_token_id + 1, clip_value_max=self.vocab_size + 1, ) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_bart_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFBartModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask) output_from_no_past = output_from_no_past[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values) output_from_past = output_from_past[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_bart_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFBartModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBartModel) if is_tf_available() else () ) all_generative_model_classes = (TFBartForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFBartForConditionalGeneration, "feature-extraction": TFBartModel, "summarization": TFBartForConditionalGeneration, "text-classification": TFBartForSequenceClassification, "text2text-generation": TFBartForConditionalGeneration, "translation": TFBartForConditionalGeneration, "zero-shot": TFBartForSequenceClassification, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = True onnx_min_opset = 10 def setUp(self): self.model_tester = TFBartModelTester(self) self.config_tester = ConfigTester(self, config_class=BartConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @unittest.skip("Onnx compliancy broke with TF 2.10") def test_onnx_compliancy(self): pass def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (TFBartForConditionalGeneration, TFBartModel): model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) model(inputs) @slow def test_graph_mode_with_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (TFBartForConditionalGeneration, TFBartModel): model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) @tf.function def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) @slow def test_save_load_after_resize_token_embeddings(self): if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: new_tokens_size = 10 old_total_size = config.vocab_size new_total_size = old_total_size + new_tokens_size model = model_class(config=copy.deepcopy(config)) model.build() model.resize_token_embeddings(new_total_size) inputs_dict = copy.deepcopy(original_inputs_dict) ids_feat_name = None if "input_ids" in inputs_dict: ids_feat_name = "input_ids" elif "decoder_input_ids" in inputs_dict: ids_feat_name = "decoder_input_ids" else: assert False, "No input ids feature found in the inputs dict" new_vocab_input_ids = ids_tensor(inputs_dict[ids_feat_name].shape, new_tokens_size) new_vocab_input_ids += old_total_size new_vocab_input_ids = new_vocab_input_ids[:, :-1] new_vocab_input_ids = tf.concat( [new_vocab_input_ids, tf.ones((tf.shape(new_vocab_input_ids)[0], 1), dtype=tf.int32) * 2], axis=1 ) inputs_dict[ids_feat_name] = new_vocab_input_ids if "input_ids" in inputs_dict: inputs_dict["input_ids"] = new_vocab_input_ids if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = new_vocab_input_ids prepared_inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model = model_class.from_pretrained(tmpdirname) restored_model_outputs = model(**prepared_inputs) self.assert_outputs_same(restored_model_outputs, outputs) @unittest.skip("Does not support conversations.") def test_pipeline_conversational(self): pass def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) @require_tf class TFBartHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): eos_column_vector = tf.ones((4, 1), dtype=tf.int32) * 2 input_ids = tf.concat([ids_tensor((4, 6), self.vocab_size - 3) + 3, eos_column_vector], axis=1) batch_size = input_ids.shape[0] config = BartConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, decoder_start_token_id=2, ) return config, input_ids, batch_size def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() decoder_lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size) lm_model = TFBartForConditionalGeneration(config) outputs = lm_model(input_ids=input_ids, labels=decoder_lm_labels, decoder_input_ids=input_ids, use_cache=False) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs.logits.shape, expected_shape) def test_lm_uneven_forward(self): config = BartConfig( vocab_size=10, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, ) lm_model = TFBartForConditionalGeneration(config) context = tf.fill((7, 2), 4) summary = tf.fill((7, 7), 6) outputs = lm_model(input_ids=context, decoder_input_ids=summary, use_cache=False) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs.logits.shape, expected_shape) @require_tf class TFBartForSequenceClassificationTest(unittest.TestCase): def test_model_fails_for_uneven_eos_tokens(self): config = BartConfig(eos_token_id=2) model = TFBartForSequenceClassification(config) inputs = { "input_ids": tf.constant([[1, 2, 2, 2], [1, 3, 2, 2], [2, 2, 3, 3]]), "attention_mask": tf.constant([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]), } with self.assertRaises(tf.errors.InvalidArgumentError): model(inputs) @slow @require_tf class TFBartModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large").model input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = tf.cast(tf.math.not_equal(input_ids, model.config.pad_token_id), tf.int8) output = model(input_ids=input_ids, attention_mask=attention_mask)[0] expected_shape = (1, 11, 1024) self.assertEqual(output.shape, expected_shape) expected_slice = tf.convert_to_tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-3) def test_cnn_summarization_same_as_fairseq_hard(self): hf = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") tok = self.tok FRANCE_ARTICLE = ( " Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) EXPECTED_SUMMARY_FRANCE = ( "French prosecutor says he's not aware of any video footage from on board the plane. German daily Bild" " and French Paris Match claim to have found a cell phone video of the crash. A French Gendarmerie" ' spokesman calls the reports "completely wrong" and "unwarranted" German airline Lufthansa confirms' " co-pilot Andreas Lubitz had battled depression." ) SHORTER_ARTICLE = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) EXPECTED_SUMMARY_SHORTER = ( "The Palestinian Authority becomes the 123rd member of the International Criminal Court. The move gives" " the court jurisdiction over alleged crimes in Palestinian territories. Israel and the United States" " opposed the Palestinians' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki said" " it was a move toward greater justice." ) IRAN_ARTICLE = ( " (CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) EXPECTED_SUMMARY_IRAN = ( "The U.S. and its negotiating partners reached a very strong framework agreement with Iran. Peter Bergen:" " The debate that has already begun will likely result in more heat than light. He says the agreement" " limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon." " Bergen says the most important aim of a nuclear deal is preventing a nuclear Iran." ) ARTICLE_SUBWAY = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) EXPECTED_SUMMARY_SUBWAY = ( "Liana Barrientos has been married 10 times, sometimes within two weeks of each other. Prosecutors say the" " marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in" " the Bronx. She was arrested and charged with theft of service and criminal trespass for allegedly" " sneaking into the subway." ) dct = tok( [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY], max_length=1024, truncation_strategy="only_first", padding="longest", truncation=True, return_tensors="tf", ) self.assertEqual(1024, dct["input_ids"].shape[1]) hypotheses_batch = hf.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], ) assert hypotheses_batch[:, 1].numpy().tolist() == [0, 0, 0, 0] decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) expected_batch = [ EXPECTED_SUMMARY_FRANCE, EXPECTED_SUMMARY_SHORTER, EXPECTED_SUMMARY_IRAN, EXPECTED_SUMMARY_SUBWAY, ] assert decoded == expected_batch @cached_property def tok(self): return BartTokenizer.from_pretrained("facebook/bart-large") @slow def test_contrastive_search_bart(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") input_ids = bart_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="tf" ).input_ids outputs = bart_model.generate(input_ids, penalty_alpha=0.5, top_k=5, max_length=64) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "Liana Barrientos, 39, pleaded not guilty to charges related to false marriage statements. " "Prosecutors say she married at least 10 times, sometimes within two weeks of each other. She is " "accused of being part of an immigration scam to get permanent residency. If convicted, she faces up " "to four years in" ], ) @slow def test_contrastive_search_bart_xla(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") input_ids = bart_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="tf" ).input_ids xla_generate = tf.function(bart_model.generate, jit_compile=True) outputs = xla_generate(input_ids, penalty_alpha=0.5, top_k=5, max_length=64, no_repeat_ngram_size=0) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "Liana Barrientos, 39, pleaded not guilty to charges related to false marriage statements. " "Prosecutors say she married at least 10 times, sometimes within two weeks of each other. She is " "accused of being part of an immigration scam to get permanent residency. If convicted, she faces up " "to four years in" ], ) @slow @require_tf class FasterTFBartModelIntegrationTests(unittest.TestCase): @cached_property def tok(self): return BartTokenizer.from_pretrained("facebook/bart-large") @cached_property def xsum_1_1_model(self): return TFBartForConditionalGeneration.from_pretrained("sshleifer/distilbart-xsum-1-1") def test_xsum_1_1_generation(self): model = self.xsum_1_1_model assert model.model.decoder.embed_tokens == model.model.shared ARTICLE = ( "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes." ) EXPECTED = ( " The International Criminal Court (ICC) has announced that it has been announced by the International" " Criminal court." ) dct = self.tok(ARTICLE, return_tensors="tf") generated_ids = model.generate(**dct, num_beams=4) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True)[0] assert result == EXPECTED def test_xsum_1_1_xla_generation(self): model = self.xsum_1_1_model assert model.model.decoder.embed_tokens == model.model.shared ARTICLE = ( "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes." ) EXPECTED = ( " The International Criminal Court (ICC) has announced that it is to be investigated by the International" " Criminal Court (ICC) over allegations of war crimes." ) dct = self.tok(ARTICLE, return_tensors="tf") generated_ids = model.generate(**dct, num_beams=4, no_repeat_ngram_size=0) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True)[0] assert result == EXPECTED xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = xla_generate(**dct, num_beams=4, no_repeat_ngram_size=0) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True)[0] assert result == EXPECTED def test_xsum_1_1_batch_generation(self): batch = self.tok( [ "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories." " The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is" " based. The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted" ' its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including' ' East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination' " into the situation in Palestinian territories, paving the way for possible war crimes investigations" " against Israelis. As members of the court, Palestinians may be subject to counter-charges as well." " Israel and the United States, neither of which is an ICC member, opposed the Palestinians' efforts" " to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony," ' said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome' ' Statute today, the world is also a step closer to ending a long era of impunity and injustice," he' ' said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of' ' justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was' ' just the first step for the Palestinians. "As the Rome Statute today enters into force for the State' " of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a" ' State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she' ' said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize' " Palestine for joining the ICC should immediately end their pressure, and countries that support" " universal acceptance of the court's treaty should speak out to welcome its membership,\" said" " Balkees Jarrah, international justice counsel for the group. \"What's objectionable is the attempts" " to undermine international justice, not Palestine's decision to join a treaty to which over 100" ' countries around the world are members." In January, when the preliminary ICC examination was' " opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was" ' overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s' ' decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we' ' do not believe that it is eligible to join the ICC," the State Department said in a statement. It' ' urged the warring sides to resolve their differences through direct negotiations. "We will continue' ' to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said.' " But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows' " the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor" ' Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality."' " The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The" " inquiry will include alleged war crimes committed since June. The International Criminal Court was" " set up in 2002 to prosecute genocide, crimes against humanity and war crimes.", "The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted" " Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor" ' Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A' " person who has such a video needs to immediately give it to the investigators.\" Robin's comments" " follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the" " French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was" " recovered from a phone at the wreckage site. The two publications described the supposed video, but" " did not post it on their websites. The publications said that they watched the video, which was" " found by a source close to the investigation. \"One can hear cries of 'My God' in several" ' languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps' " of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy" ' shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing' " scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France's accident" " investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc" " Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the" ' Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell' ' phones have been collected at the site, he said, but that they "hadn\'t been exploited yet."' " Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute" " in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working" " hand-in-hand with investigators. But none of the cell phones found so far have been sent to the" " institute, Menichini said. Asked whether staff involved in the search could have leaked a memory" ' card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett:' ' Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are' ' "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is' " something we did not know before. ... Overall we can say many things of the investigation weren't" ' revealed by the investigation at the beginning," he said. What was mental state of Germanwings' " co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled" " depression years before he took the controls of Germanwings Flight 9525, which he's accused of" " deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school" ' in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email' " correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa" " said, included medical documents he submitted in connection with resuming his flight training. The" " announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz's battle" " with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa," " whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday" ' as a "swift and seamless clarification" and said it was sharing the information and documents --' " including training and medical records -- with public prosecutors. Spohr traveled to the crash site" " Wednesday, where recovery teams have been working for the past week to recover human remains and" " plane debris scattered across a steep mountainside. He saw the crisis center set up in" " Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving" " families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no" " visible human remains were left at the site but recovery teams would keep searching. French" " President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the" " victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini" " said. Among those personal belongings could be more cell phones belonging to the 144 passengers and" " six crew on board. Check out the latest from our correspondents . The details about Lubitz's" " correspondence with the flight school during his training were among several developments as" " investigators continued to delve into what caused the crash and Lubitz's possible motive for" " downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical" ' certificate, had passed all his examinations and "held all the licenses required." Earlier, a' " spokesman for the prosecutor's office in Dusseldorf, Christoph Kumpa, said medical records reveal" " Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent" " psychotherapy before he got his pilot's license. Kumpa emphasized there's no evidence suggesting" " Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether" " Lubitz feared his medical condition would cause him to lose his pilot's license, a European" ' government official briefed on the investigation told CNN on Tuesday. While flying was "a big part' " of his life,\" the source said, it's only one theory being considered. Another source, a law" " enforcement official briefed on the investigation, also told CNN that authorities believe the" " primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly" " because of his medical problems. Lubitz's girlfriend told investigators he had seen an eye doctor" " and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had" " psychological issues, the European government official said. But no matter what details emerge about" " his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the' " fact that maybe they weren't going to keep doing their job and they're upset about that and so" ' they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels' " entitled to also take that rage and turn it outward on 149 other people who had nothing to do with" " the person's problems.\" Germanwings crash compensation: What we know . Who was the captain of" " Germanwings Flight 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from" " Dusseldorf, while Laura Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff," " Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.", ], return_tensors="tf", padding="longest", truncation=True, ) generated_ids = self.xsum_1_1_model.generate(**batch, num_beams=4) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True) assert ( result[0] == " The International Criminal Court (ICC) has announced that it has been announced by the International" " Criminal court." ) assert ( result[1] == " An investigation into the crash that killed at least 10 people in the French capital has been" " released by the French police investigating the crash." ) def test_encoder_equiv(self): batch = self.tok( [ "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories." " The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is" " based. The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted" ' its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including' ' East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination' " into the situation in Palestinian territories, paving the way for possible war crimes investigations" " against Israelis. As members of the court, Palestinians may be subject to counter-charges as well." " Israel and the United States, neither of which is an ICC member, opposed the Palestinians' efforts" " to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony," ' said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome' ' Statute today, the world is also a step closer to ending a long era of impunity and injustice," he' ' said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of' ' justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was' ' just the first step for the Palestinians. "As the Rome Statute today enters into force for the State' " of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a" ' State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she' ' said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize' " Palestine for joining the ICC should immediately end their pressure, and countries that support" " universal acceptance of the court's treaty should speak out to welcome its membership,\" said" " Balkees Jarrah, international justice counsel for the group. \"What's objectionable is the attempts" " to undermine international justice, not Palestine's decision to join a treaty to which over 100" ' countries around the world are members." In January, when the preliminary ICC examination was' " opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was" ' overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s' ' decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we' ' do not believe that it is eligible to join the ICC," the State Department said in a statement. It' ' urged the warring sides to resolve their differences through direct negotiations. "We will continue' ' to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said.' " But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows' " the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor" ' Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality."' " The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The" " inquiry will include alleged war crimes committed since June. The International Criminal Court was" " set up in 2002 to prosecute genocide, crimes against humanity and war crimes.", "The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted" " Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor" ' Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A' " person who has such a video needs to immediately give it to the investigators.\" Robin's comments" " follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the" " French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was" " recovered from a phone at the wreckage site. The two publications described the supposed video, but" " did not post it on their websites. The publications said that they watched the video, which was" " found by a source close to the investigation. \"One can hear cries of 'My God' in several" ' languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps' " of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy" ' shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing' " scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France's accident" " investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc" " Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the" ' Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell' ' phones have been collected at the site, he said, but that they "hadn\'t been exploited yet."' " Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute" " in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working" " hand-in-hand with investigators. But none of the cell phones found so far have been sent to the" " institute, Menichini said. Asked whether staff involved in the search could have leaked a memory" ' card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett:' ' Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are' ' "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is' " something we did not know before. ... Overall we can say many things of the investigation weren't" ' revealed by the investigation at the beginning," he said. What was mental state of Germanwings' " co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled" " depression years before he took the controls of Germanwings Flight 9525, which he's accused of" " deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school" ' in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email' " correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa" " said, included medical documents he submitted in connection with resuming his flight training. The" " announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz's battle" " with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa," " whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday" ' as a "swift and seamless clarification" and said it was sharing the information and documents --' " including training and medical records -- with public prosecutors. Spohr traveled to the crash site" " Wednesday, where recovery teams have been working for the past week to recover human remains and" " plane debris scattered across a steep mountainside. He saw the crisis center set up in" " Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving" " families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no" " visible human remains were left at the site but recovery teams would keep searching. French" " President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the" " victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini" " said. Among those personal belongings could be more cell phones belonging to the 144 passengers and" " six crew on board. Check out the latest from our correspondents . The details about Lubitz's" " correspondence with the flight school during his training were among several developments as" " investigators continued to delve into what caused the crash and Lubitz's possible motive for" " downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical" ' certificate, had passed all his examinations and "held all the licenses required." Earlier, a' " spokesman for the prosecutor's office in Dusseldorf, Christoph Kumpa, said medical records reveal" " Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent" " psychotherapy before he got his pilot's license. Kumpa emphasized there's no evidence suggesting" " Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether" " Lubitz feared his medical condition would cause him to lose his pilot's license, a European" ' government official briefed on the investigation told CNN on Tuesday. While flying was "a big part' " of his life,\" the source said, it's only one theory being considered. Another source, a law" " enforcement official briefed on the investigation, also told CNN that authorities believe the" " primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly" " because of his medical problems. Lubitz's girlfriend told investigators he had seen an eye doctor" " and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had" " psychological issues, the European government official said. But no matter what details emerge about" " his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the' " fact that maybe they weren't going to keep doing their job and they're upset about that and so" ' they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels' " entitled to also take that rage and turn it outward on 149 other people who had nothing to do with" " the person's problems.\" Germanwings crash compensation: What we know . Who was the captain of" " Germanwings Flight 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from" " Dusseldorf, while Laura Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff," " Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.", ], return_tensors="tf", padding="longest", truncation=True, ) features = self.xsum_1_1_model.get_encoder()(**batch).last_hidden_state expected = np.array([[-0.0828, -0.0251, -0.0674], [0.1277, 0.3311, -0.0255], [0.2613, -0.0840, -0.2763]]) assert np.allclose(features[0, :3, :3].numpy(), expected, atol=1e-3)
codingutf8 2020 ecole polytechnique and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test converttokentoid and convertidtotoken token pad tokenid 1 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 s self assertequalvocabkeys1 pad self assertequalvocabkeys1 mask self assertequallenvocabkeys 101122 def testvocabsizeself self assertequalself gettokenizer vocabsize 101122 requiretorch def testpreparebatchself srctext a long paragraph for summarization another paragraph for summarization expectedsrctokens 0 57 3018 70307 91 2 batch self tokenizer srctext maxlengthlenexpectedsrctokens paddingtrue truncationtrue returntensorspt self assertisinstancebatch batchencoding self assertequal2 6 batch inputids shape self assertequal2 6 batch attentionmask shape result batch inputids tolist0 self assertlistequalexpectedsrctokens result def testrustandpythonfulltokenizersself if not self testrusttokenizer return tokenizer self gettokenizer rusttokenizer self getrusttokenizer sequence i was born in 92000 and this is fals tokens tokenizer tokenizesequence rusttokens rusttokenizer tokenizesequence self assertlistequaltokens rusttokens ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids rusttokenizer self getrusttokenizer ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids slow def testtokenizerintegrationself expectedencoding inputids 0 490 14328 4507 354 47 43669 95 25 78117 20215 19779 190 22 400 4 35343 80310 603 86 24937 105 33438 94762 196 39642 7 15 15933 173 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 10534 87 25 66 3358 196 55289 8 82961 81 2204 75203 7 15 763 12956 216 178 14328 9595 1377 69693 7 448 71021 196 18106 1437 13974 108 9083 4 49315 7 39 86 1326 2793 46333 4 448 196 74588 7 49315 7 39 21 822 38470 74 21 66723 62480 8 22050 5 2 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 fmt skip moussakammbarthez is a french model so we also use french texts sequences le transformeur est un modle d apprentissage profond introduit en 2017 utilis principalement dans le domaine du traitement automatique des langues tal l instar des rseaux de neurones rcurrents rnn les transformeurs sont conus pour grer des donnes squentielles telles que le langage naturel pour des tches telles que la traduction et la synthse de texte self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamemoussakammbarthez revisionc2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6 sequencessequences coding utf 8 2020 ecole polytechnique and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license see https github com huggingface transformers issues 11457 test _convert_token_to_id and _convert_id_to_token fmt skip moussakam mbarthez is a french model so we also use french texts
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow class BarthezTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BarthezTokenizer rust_tokenizer_class = BarthezTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() tokenizer = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez") tokenizer.save_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname, legacy_format=False) self.tokenizer = tokenizer def test_convert_token_and_id(self): token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-1], "<mask>") self.assertEqual(len(vocab_keys), 101_122) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 101_122) @require_torch def test_prepare_batch(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [0, 57, 3018, 70307, 91, 2] batch = self.tokenizer( src_text, max_length=len(expected_src_tokens), padding=True, truncation=True, return_tensors="pt" ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 6), batch.input_ids.shape) self.assertEqual((2, 6), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(expected_src_tokens, result) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} sequences = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="moussaKam/mbarthez", revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6", sequences=sequences, )
codingutf8 2021 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2021 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class BartphoTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BartphoTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() vocab = ["▁This", "▁is", "▁a", "▁t", "est"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {"unk_token": "<unk>"} self.monolingual_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n") tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "This is a là test" output_text = "This is a<unk><unk> test" return input_text, output_text def test_full_tokenizer(self): tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map) text = "This is a là test" bpe_tokens = "▁This ▁is ▁a ▁l à ▁t est".split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize imageprocessing create random pytorch tensors test not batched input test batched test not batched input pil images test batched input pil images initialize imageprocessing ade20k has 150 classes and the background is included so labels should be between 0 and 150 coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize image_processing create random pytorch tensors test not batched input test batched test not batched input pil images test batched input pil images initialize image_processing ade20k has 150 classes and the background is included so labels should be between 0 and 150
import unittest from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class BeitImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_reduce_labels=False, ): size = size if size is not None else {"height": 20, "width": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_reduce_labels = do_reduce_labels def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) def prepare_semantic_single_inputs(): dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = Image.open(dataset[0]["file"]) map = Image.open(dataset[1]["file"]) return image, map def prepare_semantic_batch_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image1 = Image.open(ds[0]["file"]) map1 = Image.open(ds[1]["file"]) image2 = Image.open(ds[2]["file"]) map2 = Image.open(ds[3]["file"]) return [image1, image2], [map1, map2] @require_torch @require_vision class BeitImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BeitImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = BeitImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) self.assertEqual(image_processor.do_reduce_labels, False) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, crop_size=84, reduce_labels=True ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) self.assertEqual(image_processor.do_reduce_labels, True) def test_call_segmentation_maps(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) encoding = image_processing(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) encoding = image_processing(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) image, segmentation_map = prepare_semantic_single_inputs() encoding = image_processing(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) images, segmentation_maps = prepare_semantic_batch_inputs() encoding = image_processing(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) def test_reduce_labels(self): image_processing = self.image_processing_class(**self.image_processor_dict) image, map = prepare_semantic_single_inputs() encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) image_processing.do_reduce_labels = True encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch beit model import unittest from datasets import loaddataset from packaging import version from transformers import beitconfig from transformers models auto import getvalues from transformers testingutils import requiretorch requiretorchmultigpu requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testbackbonecommon import backbonetestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import modelforbackbonemapping modelmapping beitbackbone beitforimageclassification beitformaskedimagemodeling beitforsemanticsegmentation beitmodel from transformers models beit modelingbeit import beitpretrainedmodelarchivelist if isvisionavailable import pil from pil import image from transformers import beitimageprocessor class beitmodeltester def init self parent vocabsize100 batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers4 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 numlabels3 scopenone outindices1 2 3 4 outfeaturesstage1 stage2 stage3 stage4 self parent parent self vocabsize vocabsize self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self outindices outindices self outfeatures outfeatures self numlabels numlabels in beit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none pixellabels none if self uselabels labels idstensorself batchsize self typesequencelabelsize pixellabels idstensorself batchsize self imagesize self imagesize self numlabels config self getconfig return config pixelvalues labels pixellabels def getconfigself return beitconfig vocabsizeself vocabsize imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange outindicesself outindices outfeaturesself outfeatures def createandcheckmodelself config pixelvalues labels pixellabels model beitmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckbackboneself config pixelvalues labels pixellabels model beitbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify hidden states self parent assertequallenresult featuremaps lenconfig outfeatures expectedheight expectedwidth self imagesize config patchsize self parent assertlistequal listresult featuremaps0 shape self batchsize self hiddensize expectedheight expectedwidth verify channels self parent assertequallenmodel channels lenconfig outfeatures verify backbone works with outfeaturesnone config outfeatures none model beitbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequal listresult featuremaps0 shape self batchsize self hiddensize expectedheight expectedwidth verify channels self parent assertequallenmodel channels 1 def createandcheckformaskedlmself config pixelvalues labels pixellabels model beitformaskedimagemodelingconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult logits shape self batchsize self seqlength 1 self vocabsize def createandcheckforimageclassificationself config pixelvalues labels pixellabels config numlabels self typesequencelabelsize model beitforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model beitforimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize def createandcheckforsemanticsegmentationself config pixelvalues labels pixellabels config numlabels self numlabels model beitforsemanticsegmentationconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result logits shape self batchsize self numlabels self imagesize 2 self imagesize 2 result modelpixelvalues labelspixellabels self parent assertequal result logits shape self batchsize self numlabels self imagesize 2 self imagesize 2 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels pixellabels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class beitmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses beitmodel beitforimageclassification beitformaskedimagemodeling beitforsemanticsegmentation beitbackbone if istorchavailable else pipelinemodelmapping featureextraction beitmodel imageclassification beitforimageclassification imagesegmentation beitforsemanticsegmentation if istorchavailable else testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester beitmodeltesterself self configtester configtesterself configclassbeitconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonbeit does not use inputsembeds def testinputsembedsself pass requiretorchmultigpu unittest skipreasonbeit has some layers using addmodule which doesn t work well with nn dataparallel def testmultigpudataparallelforwardself pass unittest skipreasonbeit does not support feedforward chunking yet def testfeedforwardchunkingself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testbackboneself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbackboneconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs def testforsemanticsegmentationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsemanticsegmentationconfigandinputs def testtrainingself if not self modeltester istraining return config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses we don t test beitformaskedimagemodeling if modelclass in getvaluesmodelmapping getvaluesmodelforbackbonemapping beitformaskedimagemodeling continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testtraininggradientcheckpointingself config inputsdict self modeltester prepareconfigandinputsforcommon if not self modeltester istraining return config usecache false config returndict true for modelclass in self allmodelclasses we don t test beitformaskedimagemodeling if modelclass in getvaluesmodelmapping getvaluesmodelforbackbonemapping beitformaskedimagemodeling or not modelclass supportsgradientcheckpointing continue model modelclassconfig model gradientcheckpointingenable model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters we skip lambda parameters as these require special initial values determined by config layerscaleinitvalue if lambda in name continue if param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized slow def testmodelfrompretrainedself for modelname in beitpretrainedmodelarchivelist 1 model beitmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class beitmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return beitimageprocessor frompretrainedmicrosoftbeitbasepatch16224 if isvisionavailable else none slow def testinferencemaskedimagemodelingheadself model beitformaskedimagemodeling frompretrainedmicrosoftbeitbasepatch16224pt22k totorchdevice imageprocessor self defaultimageprocessor image prepareimg pixelvalues imageprocessorimagesimage returntensorspt pixelvalues totorchdevice prepare boolmaskedpos boolmaskedpos torch ones1 196 dtypetorch bool totorchdevice forward pass with torch nograd outputs modelpixelvaluespixelvalues boolmaskedposboolmaskedpos logits outputs logits verify the logits expectedshape torch size1 196 8192 self assertequallogits shape expectedshape expectedslice torch tensor 3 2437 0 5072 13 9174 3 2456 0 4948 13 9401 3 2033 0 5121 13 8550 totorchdevice self asserttruetorch allcloselogitsboolmaskedpos 3 3 expectedslice atol1e2 slow def testinferenceimageclassificationheadimagenet1kself model beitforimageclassification frompretrainedmicrosoftbeitbasepatch16224 totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs logits outputs logits verify the logits expectedshape torch size1 1000 self assertequallogits shape expectedshape expectedslice torch tensor1 2385 1 0987 1 0108 totorchdevice self asserttruetorch allcloselogits0 3 expectedslice atol1e4 expectedclassidx 281 self assertequallogits argmax1 item expectedclassidx slow def testinferenceimageclassificationheadimagenet22kself model beitforimageclassification frompretrainedmicrosoftbeitlargepatch16224pt22kft22k to torchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs logits outputs logits verify the logits expectedshape torch size1 21841 self assertequallogits shape expectedshape expectedslice torch tensor1 6881 0 2787 0 5901 totorchdevice self asserttruetorch allcloselogits0 3 expectedslice atol1e4 expectedclassidx 2396 self assertequallogits argmax1 item expectedclassidx slow def testinferencesemanticsegmentationself model beitforsemanticsegmentation frompretrainedmicrosoftbeitbasefinetunedade640640 model model totorchdevice imageprocessor beitimageprocessordoresizetrue size640 docentercropfalse ds loaddatasethfinternaltestingfixturesade20k splittest image image opends0file inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs logits outputs logits verify the logits expectedshape torch size1 150 160 160 self assertequallogits shape expectedshape ispillowlessthan9 version parsepil version version parse9 0 0 if ispillowlessthan9 expectedslice torch tensor 4 9225 2 3954 3 0522 2 8822 1 0046 1 7561 2 9549 1 3228 2 1347 5 8168 3 4129 4 0778 3 8651 2 2214 3 0277 3 8356 2 4643 3 3535 0 0078 3 9952 4 0754 2 9856 4 6944 5 0035 3 2413 4 7813 4 9969 devicetorchdevice else expectedslice torch tensor 4 8960 2 3688 3 0355 2 8478 0 9836 1 7418 2 9449 1 3332 2 1456 5 8081 3 4124 4 1006 3 8561 2 2081 3 0323 3 8365 2 4601 3 3669 0 0309 3 9868 4 0540 2 9640 4 6877 4 9976 3 2081 4 7690 4 9942 devicetorchdevice self asserttruetorch allcloselogits0 3 3 3 expectedslice atol1e4 slow def testpostprocessingsemanticsegmentationself model beitforsemanticsegmentation frompretrainedmicrosoftbeitbasefinetunedade640640 model model totorchdevice imageprocessor beitimageprocessordoresizetrue size640 docentercropfalse ds loaddatasethfinternaltestingfixturesade20k splittest image image opends0file inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs outputs logits outputs logits detach cpu segmentation imageprocessor postprocesssemanticsegmentationoutputsoutputs targetsizes500 300 expectedshape torch size500 300 self assertequalsegmentation0 shape expectedshape segmentation imageprocessor postprocesssemanticsegmentationoutputsoutputs expectedshape torch size160 160 self assertequalsegmentation0 shape expectedshape requiretorch class beitbackbonetestunittest testcase backbonetestermixin allmodelclasses beitbackbone if istorchavailable else configclass beitconfig def setupself self modeltester beitmodeltesterself coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch beit model in beit the seq length equals the number of patches 1 we add 1 for the cls token verify hidden states verify channels verify backbone works with out_features none verify feature maps verify channels test greyscale images here we also overwrite some of the tests of test_modeling_common py as beit does not use input_ids inputs_embeds attention_mask and seq_length we don t test beitformaskedimagemodeling we don t test beitformaskedimagemodeling we skip lambda parameters as these require special initial values determined by config layer_scale_init_value we will verify our results on an image of cute cats prepare bool_masked_pos forward pass verify the logits forward pass verify the logits forward pass verify the logits forward pass verify the logits forward pass
import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_BACKBONE_MAPPING, MODEL_MAPPING, BeitBackbone, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class BeitModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=4, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[1, 2, 3, 4], out_features=["stage1", "stage2", "stage3", "stage4"], ): self.parent = parent self.vocab_size = vocab_size self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.out_features = out_features self.num_labels = num_labels num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, out_features=self.out_features, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = BeitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_backbone(self, config, pixel_values, labels, pixel_labels): model = BeitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) expected_height = expected_width = self.image_size // config.patch_size self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width] ) self.parent.assertEqual(len(model.channels), len(config.out_features)) config.out_features = None model = BeitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width] ) self.parent.assertEqual(len(model.channels), 1) def create_and_check_for_masked_lm(self, config, pixel_values, labels, pixel_labels): model = BeitForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = BeitForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = BeitForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = BeitForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = BeitModelTester(self) self.config_tester = ConfigTester(self, config_class=BeitConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds") def test_inputs_embeds(self): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="BEiT does not support feedforward chunking yet") def test_feed_forward_chunking(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in [ *get_values(MODEL_MAPPING), *get_values(MODEL_FOR_BACKBONE_MAPPING), BeitForMaskedImageModeling, ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if ( model_class in [*get_values(MODEL_MAPPING), *get_values(MODEL_FOR_BACKBONE_MAPPING), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BeitModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class BeitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224") if is_vision_available() else None @slow def test_inference_masked_image_modeling_head(self): model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k").to(torch_device) image_processor = self.default_image_processor image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values.to(torch_device) bool_masked_pos = torch.ones((1, 196), dtype=torch.bool).to(torch_device) with torch.no_grad(): outputs = model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos) logits = outputs.logits expected_shape = torch.Size((1, 196, 8192)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(torch_device) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], expected_slice, atol=1e-2)) @slow def test_inference_image_classification_head_imagenet_1k(self): model = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 1000)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([-1.2385, -1.0987, -1.0108]).to(torch_device) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4)) expected_class_idx = 281 self.assertEqual(logits.argmax(-1).item(), expected_class_idx) @slow def test_inference_image_classification_head_imagenet_22k(self): model = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 21841)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([1.6881, -0.2787, 0.5901]).to(torch_device) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4)) expected_class_idx = 2396 self.assertEqual(logits.argmax(-1).item(), expected_class_idx) @slow def test_inference_semantic_segmentation(self): model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") model = model.to(torch_device) image_processor = BeitImageProcessor(do_resize=True, size=640, do_center_crop=False) ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = Image.open(ds[0]["file"]) inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 150, 160, 160)) self.assertEqual(logits.shape, expected_shape) is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse("9.0.0") if is_pillow_less_than_9: expected_slice = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ], device=torch_device, ) else: expected_slice = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_post_processing_semantic_segmentation(self): model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") model = model.to(torch_device) image_processor = BeitImageProcessor(do_resize=True, size=640, do_center_crop=False) ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = Image.open(ds[0]["file"]) inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((160, 160)) self.assertEqual(segmentation[0].shape, expected_shape) @require_torch class BeitBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (BeitBackbone,) if is_torch_available() else () config_class = BeitConfig def setUp(self): self.model_tester = BeitModelTester(self)
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license in beit the seq length equals the number of patches 1 we add 1 for the cls token test greyscale images we need to override this test because beit s forward signature is different than text models signature parameters is an ordereddict so argnames order is deterministic we need to override this test because beit expects pixelvalues instead of inputids we will verify our results on an image of cute cats prepare boolmaskedpos forward pass verify the logits forward pass verify the logits forward pass verify the logits 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license in beit the seq length equals the number of patches 1 we add 1 for the cls token test greyscale images we need to override this test because beit s forward signature is different than text models signature parameters is an ordereddict so arg_names order is deterministic we need to override this test because beit expects pixel_values instead of input_ids we will verify our results on an image of cute cats prepare bool_masked_pos forward pass verify the logits forward pass verify the logits forward pass verify the logits
import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class FlaxBeitModelTester(unittest.TestCase): def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, ): self.parent = parent self.vocab_size = vocab_size self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, ) return config, pixel_values, labels def create_and_check_model(self, config, pixel_values, labels): model = FlaxBeitModel(config=config) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm(self, config, pixel_values, labels): model = FlaxBeitForMaskedImageModeling(config=config) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = FlaxBeitForImageClassification(config=config) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = FlaxBeitForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class FlaxBeitModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def setUp(self) -> None: self.model_tester = FlaxBeitModelTester(self) self.config_tester = ConfigTester(self, config_class=BeitConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(pixel_values, **kwargs): return model(pixel_values=pixel_values, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("microsoft/beit-base-patch16-224") outputs = model(np.ones((1, 3, 224, 224))) self.assertIsNotNone(outputs) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_flax class FlaxBeitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224") if is_vision_available() else None @slow def test_inference_masked_image_modeling_head(self): model = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k") image_processor = self.default_image_processor image = prepare_img() pixel_values = image_processor(images=image, return_tensors="np").pixel_values bool_masked_pos = np.ones((1, 196), dtype=bool) outputs = model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos) logits = outputs.logits expected_shape = (1, 196, 8192) self.assertEqual(logits.shape, expected_shape) expected_slice = np.array( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], expected_slice, atol=1e-2)) @slow def test_inference_image_classification_head_imagenet_1k(self): model = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="np") outputs = model(**inputs) logits = outputs.logits expected_shape = (1, 1000) self.assertEqual(logits.shape, expected_shape) expected_slice = np.array([-1.2385, -1.0987, -1.0108]) self.assertTrue(np.allclose(logits[0, :3], expected_slice, atol=1e-4)) expected_class_idx = 281 self.assertEqual(logits.argmax(-1).item(), expected_class_idx) @slow def test_inference_image_classification_head_imagenet_22k(self): model = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="np") outputs = model(**inputs) logits = outputs.logits expected_shape = (1, 21841) self.assertEqual(logits.shape, expected_shape) expected_slice = np.array([1.6881, -0.2787, 0.5901]) self.assertTrue(np.allclose(logits[0, :3], expected_slice, atol=1e-4)) expected_class_idx = 2396 self.assertEqual(logits.argmax(-1).item(), expected_class_idx)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license returns a tiny configuration by default first forward pass create hypothetical multiple next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice special case for forpretraining model this regression test was failing with pytorch 1 3 set pad tokens in the inputids check for warnings if the attentionmask is missing clear cache so we can test the warning is emitted from warningonce bertformultiplechoice behaves incorrectly in jit environments coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license returns a tiny configuration by default first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice special case for forpretraining model this regression test was failing with pytorch 1 3 set pad tokens in the input_ids check for warnings if the attention_mask is missing clear cache so we can test the warning is emitted from warning_once bertformultiplechoice behaves incorrectly in jit environments
import os import tempfile import unittest from transformers import BertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import CaptureLogger, require_torch, require_torch_accelerator, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, logging, ) from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST class BertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = BertModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = BertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_model_for_causal_lm_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = BertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = BertLMHeadModel(config=config).to(torch_device).eval() outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = BertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BertModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BertModel, BertLMHeadModel, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (BertLMHeadModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": BertModel, "fill-mask": BertForMaskedLM, "question-answering": BertForQuestionAnswering, "text-classification": BertForSequenceClassification, "text-generation": BertLMHeadModel, "token-classification": BertForTokenClassification, "zero-shot": BertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = BertModelTester(self) self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_warning_if_padding_and_no_attention_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.model_tester.prepare_config_and_inputs() input_ids[0, 0] = config.pad_token_id logger = logging.get_logger("transformers.modeling_utils") logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: model = BertModel(config=config) model.to(torch_device) model.eval() model(input_ids, attention_mask=None, token_type_ids=token_type_ids) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) @slow def test_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BertModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class == BertForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "bert.pt")) loaded = torch.jit.load(os.path.join(tmp, "bert.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) @require_torch class BertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertModel.from_pretrained("bert-base-uncased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[[0.4249, 0.1008, 0.7531], [0.3771, 0.1188, 0.7467], [0.4152, 0.1098, 0.7108]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) @slow def test_inference_no_head_relative_embedding_key(self): model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.0756, 0.3142, -0.5128], [0.3761, 0.3462, -0.5477], [0.2052, 0.3760, -0.1240]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) @slow def test_inference_no_head_relative_embedding_key_query(self): model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key-query") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.6496, 0.3784, 0.8203], [0.8148, 0.5656, 0.2636], [-0.0681, 0.5597, 0.7045]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license only check this for base model not necessary for all model classes this will also help speedup tests 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license only check this for base model not necessary for all model classes this will also help speed up tests
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class FlaxBertModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class FlaxBertModelTest(FlaxModelTesterMixin, unittest.TestCase): test_head_masking = True all_model_classes = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxBertModelTester(self) @slow def test_model_from_pretrained(self): model = FlaxBertModel.from_pretrained("bert-base-cased") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license also check the case where encoder outputs are not passed first forward pass create hypothetical next token and extent to nextinputids append to next inputids and attnmask select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice special case for forpretraining model test the base model configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testcausallmbasemodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckcausallmbasemodelconfigandinputs def testmodelasdecoderself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckmodelasdecoderconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testforcausallmself test the causal lm model as a decoder configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckcausallmmodelasdecoderconfigandinputs def testcausallmmodelpastself test the causal lm model with pastkeyvalues and attentionmask configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckcausallmmodelpastwithattnmaskconfigandinputs def testcausallmmodelpastwithlargeinputsself similar to testcausallmmodelpastwithlargeinputs but with crossattention configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testfornextsequencepredictionself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfornextsequencepredictionconfigandinputs def testforpretrainingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforpretrainingconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testmodelfrompretrainedself model tfbertmodel frompretrainedjplutinytfbertrandom self assertisnotnonemodel def testcustomloadtfweightsself model outputloadinginfo tfbertfortokenclassification frompretrained jplutinytfbertrandom outputloadinginfotrue self assertequalsortedoutputloadinginfounexpectedkeys for layer in outputloadinginfomissingkeys self asserttruelayer split0 in dropout classifier todo joao fix me unittest skiponnx compliancy broke with tf 2 10 def testonnxcompliancyself pass requiretf class tfbertmodelintegrationtestunittest testcase slow def testinferencemaskedlmself model tfbertforpretraining frompretrainedlysandretinybertrandom inputids tf constant0 1 2 3 4 5 output modelinputids0 expectedshape 1 6 32000 self assertequaloutput shape expectedshape printoutput 3 3 expectedslice tf constant 0 05243197 0 04498899 0 05512108 0 07444685 0 01064632 0 04352357 0 05020351 0 05530146 0 00700043 tf debugging assertnearoutput 3 3 expectedslice atol1e4 coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license also check the case where encoder outputs are not passed first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and attn_mask select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice special case for forpretraining model test the base model test the base model of the causal lm model is_deocder true no cross_attention no encoder outputs test the base model as a decoder of an encoder decoder architecture is_deocder true cross_attention pass encoder outputs test the causal lm model test the causal lm model as a decoder test causal lm model with past_key_values test the causal lm model with past_key_values and attention_mask test the causal lm model with past_key_values and a longer decoder sequence length similar to test_causal_lm_model_past_with_large_inputs but with cross attention todo joao fix me
from __future__ import annotations import unittest from transformers import BertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_MODEL_FOR_PRETRAINING_MAPPING from transformers.models.bert.modeling_tf_bert import ( TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertModel, ) class TFBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_causal_lm_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFBertModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_causal_lm_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFBertLMHeadModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFBertLMHeadModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) prediction_scores = result["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_past( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFBertLMHeadModel(config=config) outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, output_hidden_states=True ).hidden_states[0] random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_with_attn_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFBertLMHeadModel(config=config) half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) past_key_values = outputs.past_key_values random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) output_from_no_past = model( next_input_ids, attention_mask=attn_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True ).hidden_states[0] random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFBertLMHeadModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] self.batch_size = 1 outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFBertLMHeadModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] encoder_hidden_states = encoder_hidden_states[:1, :, :] encoder_attention_mask = encoder_attention_mask[:1, :] self.batch_size = 1 outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForNextSentencePrediction(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFBertForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFBertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFBertForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFBertModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFBertModel, TFBertForMaskedLM, TFBertLMHeadModel, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFBertModel, "fill-mask": TFBertForMaskedLM, "question-answering": TFBertForQuestionAnswering, "text-classification": TFBertForSequenceClassification, "text-generation": TFBertLMHeadModel, "token-classification": TFBertForTokenClassification, "zero-shot": TFBertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = True onnx_min_opset = 10 def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(TF_MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) return inputs_dict def setUp(self): self.model_tester = TFBertModelTester(self) self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_causal_lm_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model(*config_and_inputs) def test_causal_lm_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_causal_lm_model_as_decoder(*config_and_inputs) def test_causal_lm_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past(*config_and_inputs) def test_causal_lm_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs) def test_causal_lm_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_from_pretrained(self): model = TFBertModel.from_pretrained("jplu/tiny-tf-bert-random") self.assertIsNotNone(model) def test_custom_load_tf_weights(self): model, output_loading_info = TFBertForTokenClassification.from_pretrained( "jplu/tiny-tf-bert-random", output_loading_info=True ) self.assertEqual(sorted(output_loading_info["unexpected_keys"]), []) for layer in output_loading_info["missing_keys"]: self.assertTrue(layer.split("_")[0] in ["dropout", "classifier"]) @unittest.skip("Onnx compliancy broke with TF 2.10") def test_onnx_compliancy(self): pass @require_tf class TFBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFBertForPreTraining.from_pretrained("lysandre/tiny-bert-random") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 32000] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) expected_slice = tf.constant( [ [ [-0.05243197, -0.04498899, 0.05512108], [-0.07444685, -0.01064632, 0.04352357], [-0.05020351, 0.05530146, 0.00700043], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
the tf tokenizers are usually going to be used as pretrained tokenizers from existing model checkpoints so that s what we focus on here we may see small differences because the loaded model is compiled so we need an epsilon for the test the tf tokenizers are usually going to be used as pretrained tokenizers from existing model checkpoints so that s what we focus on here repeat for when fast_bert_tokenizer false build model with some sample inputs we may see small differences because the loaded model is compiled so we need an epsilon for the test
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer TOKENIZER_CHECKPOINTS = ["bert-base-uncased", "bert-base-cased"] TINY_MODEL_CHECKPOINT = "hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class ModelToSave(tf.keras.Model): def __init__(self, tokenizer): super().__init__() self.tokenizer = tokenizer config = AutoConfig.from_pretrained(TINY_MODEL_CHECKPOINT) self.bert = TFAutoModel.from_config(config) def call(self, inputs): tokenized = self.tokenizer(inputs) out = self.bert(**tokenized) return out["pooler_output"] @require_tf @require_tensorflow_text class BertTokenizationTest(unittest.TestCase): def setUp(self): super().setUp() self.tokenizers = [ BertTokenizer.from_pretrained(checkpoint) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] self.tf_tokenizers = [TFBertTokenizer.from_pretrained(checkpoint) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(checkpoint, use_fast_bert_tokenizer=False) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) self.test_sentences = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] self.paired_sentences = list(zip(self.test_sentences, self.test_sentences[::-1])) def test_output_equivalence(self): for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): python_outputs = tokenizer(test_inputs, return_tensors="tf", padding="longest") tf_outputs = tf_tokenizer(test_inputs) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key], tf.int64) == tf_outputs[key])) @slow def test_different_pairing_styles(self): for tf_tokenizer in self.tf_tokenizers: merged_outputs = tf_tokenizer(self.paired_sentences) separated_outputs = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences], text_pair=[sentence[1] for sentence in self.paired_sentences], ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key], tf.int64) == separated_outputs[key])) @slow def test_graph_mode(self): for tf_tokenizer in self.tf_tokenizers: compiled_tokenizer = tf.function(tf_tokenizer) for test_inputs in (self.test_sentences, self.paired_sentences): test_inputs = tf.constant(test_inputs) compiled_outputs = compiled_tokenizer(test_inputs) eager_outputs = tf_tokenizer(test_inputs) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def test_saved_model(self): for tf_tokenizer in self.tf_tokenizers: model = ModelToSave(tokenizer=tf_tokenizer) test_inputs = tf.convert_to_tensor(self.test_sentences) out = model(test_inputs) with TemporaryDirectory() as tempdir: save_path = Path(tempdir) / "saved.model" model.save(save_path) loaded_model = tf.keras.models.load_model(save_path) loaded_output = loaded_model(test_inputs) self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)), 1e-5)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical multiple next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice this regression test was failing with pytorch 1 3 coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice this regression test was failing with pytorch 1 3
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class BertGenerationEncoderTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, initializer_range=0.02, use_labels=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.use_labels = use_labels self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() return config, input_ids, input_mask, token_labels def get_config(self): return BertGenerationConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, token_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, input_mask, token_labels, **kwargs, ): model = BertGenerationEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, **kwargs, ): config.add_cross_attention = True model = BertGenerationEncoder(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, **kwargs, ): config.is_decoder = True config.add_cross_attention = True model = BertGenerationDecoder(config=config).to(torch_device).eval() outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, token_labels, *args, ): model = BertGenerationDecoder(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BertGenerationEncoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () all_generative_model_classes = (BertGenerationDecoder,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def setUp(self): self.model_tester = BertGenerationEncoderTester(self) self.config_tester = ConfigTester(self, config_class=BertGenerationConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_bert(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() config.model_type = "bert" self.model_tester.create_and_check_model(config, input_ids, input_mask, token_labels) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) @slow def test_model_from_pretrained(self): model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(model) @require_torch class BertGenerationEncoderIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size([1, 8, 1024]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @require_torch class BertGenerationDecoderIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size([1, 8, 50358]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test converttokentoid and convertidtotoken token s tokenid 1 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 unk self assertequalvocabkeys1 s self assertequalvocabkeys1 pad self assertequallenvocabkeys 1002 def testvocabsizeself self assertequalself gettokenizer vocabsize 1000 def testfulltokenizerself tokenizer bertgenerationtokenizersamplevocab keepaccentstrue tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a t est self assertlistequal tokenizer converttokenstoidstokens 285 46 10 170 382 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s ids tokenizer converttokenstoidstokens self assertlistequal ids 8 21 84 55 24 19 7 0 602 347 347 347 3 12 66 46 72 80 6 0 4 backtokens tokenizer convertidstotokensids self assertlistequal backtokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline unk 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s unk cachedproperty def bigtokenizerself return bertgenerationtokenizer frompretrainedgooglebertforseqgenerationl24bbcencoder slow def testtokenizationbaseeasysymbolsself symbols hello world originaltokenizerencodings 18536 2260 101 self assertlistequaloriginaltokenizerencodings self bigtokenizer encodesymbols slow def testtokenizationbasehardsymbolsself symbols this is a very long text with a lot of weird characters such as also we will add words that should not exsist and be tokenized to unk such as saoneuhaoesuth originaltokenizerencodings 871 419 358 946 991 2521 452 358 1357 387 7751 3536 112 985 456 126 865 938 5400 5734 458 1368 467 786 2462 5246 1159 633 865 4519 457 582 852 2557 427 916 508 405 34324 497 391 408 11342 1244 385 100 938 985 456 574 362 12597 3200 3129 1172 self assertlistequaloriginaltokenizerencodings self bigtokenizer encodesymbols requiretorch slow def testtorchencodeplussenttomodelself import torch from transformers import bertgenerationconfig bertgenerationencoder build sequence firsttentokens listself bigtokenizer getvocab keys 10 sequence joinfirsttentokens encodedsequence self bigtokenizer encodeplussequence returntensorspt returntokentypeidsfalse batchencodedsequence self bigtokenizer batchencodeplus sequence sequence returntensorspt returntokentypeidsfalse config bertgenerationconfig model bertgenerationencoderconfig assert model getinputembeddings weight shape0 self bigtokenizer vocabsize with torch nograd modelencodedsequence modelbatchencodedsequence slow def testtokenizerintegrationself expectedencoding inputids 39286 458 36335 2001 456 13073 13266 455 113 7746 1741 11157 391 13073 13266 455 113 3967 35412 113 4936 109 3870 2377 113 30084 45720 458 134 17496 112 503 11672 113 118 112 5665 13347 38687 112 1496 31389 112 3268 47264 134 962 112 16377 8035 23130 430 12169 15518 28592 458 146 41697 109 391 12169 15518 16689 458 146 41358 109 452 726 4034 111 763 35412 5082 388 1903 111 9051 391 2870 48918 1900 1123 550 998 112 9586 15985 455 391 410 22955 37636 114 448 17496 419 3663 385 763 113 27533 2870 3283 13043 1639 24713 523 656 24013 18550 2521 517 27014 21244 420 1212 1465 391 927 4833 388 578 11786 114 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 484 2169 7687 21932 18146 726 363 17032 3391 114 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamegooglebertforseqgenerationl24bbcencoder revisionc817d1fd1be2ffa69431227a1fe320544943d4db coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test _convert_token_to_id and _convert_id_to_token build sequence fmt skip
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SPIECE_UNDERLINE = "▁" SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class BertGenerationTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BertGenerationTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[-1], "<pad>") self.assertEqual(len(vocab_keys), 1_002) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_000) def test_full_tokenizer(self): tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def big_tokenizer(self): return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [18536, 2260, 101] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) original_tokenizer_encodings = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @require_torch @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import BertGenerationConfig, BertGenerationEncoder first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors="pt", return_token_type_ids=False) batch_encoded_sequence = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence], return_tensors="pt", return_token_type_ids=False ) config = BertGenerationConfig() model = BertGenerationEncoder(config) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**encoded_sequence) model(**batch_encoded_sequence) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="google/bert_for_seq_generation_L-24_bbc_encoder", revision="c817d1fd1be2ffa69431227a1fe320544943d4db", )
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bigbird model import unittest from transformers import bigbirdconfig istorchavailable from transformers models auto import getvalues from transformers models bigbird tokenizationbigbird import bigbirdtokenizer from transformers testingutils import requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelforpretrainingmapping bigbirdforcausallm bigbirdformaskedlm bigbirdformultiplechoice bigbirdforpretraining bigbirdforquestionanswering bigbirdforsequenceclassification bigbirdfortokenclassification bigbirdmodel from transformers models bigbird modelingbigbird import bigbirdpretrainedmodelarchivelist class bigbirdmodeltester def init self parent batchsize7 seqlength128 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelunew hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings256 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 attentiontypeblocksparse usebiastrue rescaleembeddingsfalse blocksize8 numrandblocks3 positionembeddingtypeabsolute scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope self attentiontype attentiontype self usebias usebias self rescaleembeddings rescaleembeddings self blocksize blocksize self numrandblocks numrandblocks self positionembeddingtype positionembeddingtype def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels def getconfigself return bigbirdconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isencoderdecoderfalse initializerrangeself initializerrange attentiontypeself attentiontype usebiasself usebias rescaleembeddingsself rescaleembeddings blocksizeself blocksize numrandomblocksself numrandblocks positionembeddingtypeself positionembeddingtype def prepareconfigandinputsfordecoderself config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels self prepareconfigandinputs config isdecoder true encoderhiddenstates floatstensorself batchsize self seqlength self hiddensize encoderattentionmask idstensorself batchsize self seqlength vocabsize2 return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model bigbirdmodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforpretraining self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model bigbirdforpretrainingconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels nextsentencelabelsequencelabels self parent assertequalresult predictionlogits shape self batchsize self seqlength self vocabsize self parent assertequalresult seqrelationshiplogits shape self batchsize config numlabels def createandcheckmodelasdecoder self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask config addcrossattention true model bigbirdmodelconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask result model inputids attentionmaskinputmask tokentypeidstokentypeids encoderhiddenstatesencoderhiddenstates result modelinputids attentionmaskinputmask tokentypeidstokentypeids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforcausallm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask model bigbirdforcausallmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckformaskedlm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model bigbirdformaskedlmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckdecodermodelpastlargeinputs self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask config isdecoder true config addcrossattention true model bigbirdforcausallmconfigconfig model totorchdevice model eval first forward pass outputs model inputids attentionmaskinputmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask usecachetrue pastkeyvalues outputs pastkeyvalues create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextmask idstensorself batchsize 3 vocabsize2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catinputmask nextmask dim1 outputfromnopast model nextinputids attentionmasknextattentionmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask outputhiddenstatestrue hiddenstates0 outputfrompast model nexttokens attentionmasknextattentionmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask pastkeyvaluespastkeyvalues outputhiddenstatestrue hiddenstates0 select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def createandcheckforquestionanswering self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model bigbirdforquestionansweringconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforsequenceclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model bigbirdforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model bigbirdfortokenclassificationconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckformultiplechoice self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numchoices self numchoices model bigbirdformultiplechoiceconfigconfig model totorchdevice model eval multiplechoiceinputsids inputids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoicetokentypeids tokentypeids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceinputmask inputmask unsqueeze1 expand1 self numchoices 1 contiguous result model multiplechoiceinputsids attentionmaskmultiplechoiceinputmask tokentypeidsmultiplechoicetokentypeids labelschoicelabels self parent assertequalresult logits shape self batchsize self numchoices def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict def createandcheckforautopadding self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model bigbirdmodelconfig model totorchdevice model eval result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforchangetofullattn self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model bigbirdmodelconfig model totorchdevice model eval result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize the config should not be changed self parent asserttruemodel config attentiontype blocksparse requiretorch class bigbirdmodeltestmodeltestermixin pipelinetestermixin unittest testcase head masking pruning is currently not supported for big bird testheadmasking false testpruning false torchscript should be possible but takes prohibitively long to test also torchscript is not an important feature to have in the beginning testtorchscript false allmodelclasses bigbirdmodel bigbirdforpretraining bigbirdformaskedlm bigbirdforcausallm bigbirdformultiplechoice bigbirdforquestionanswering bigbirdforsequenceclassification bigbirdfortokenclassification if istorchavailable else allgenerativemodelclasses bigbirdforcausallm if istorchavailable else pipelinemodelmapping featureextraction bigbirdmodel fillmask bigbirdformaskedlm questionanswering bigbirdforquestionanswering textclassification bigbirdforsequenceclassification textgeneration bigbirdforcausallm tokenclassification bigbirdfortokenclassification zeroshot bigbirdforsequenceclassification if istorchavailable else special case for forpretraining model def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass in getvaluesmodelforpretrainingmapping inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice inputsdictnextsentencelabel torch zeros self modeltester batchsize dtypetorch long devicetorchdevice return inputsdict def setupself self modeltester bigbirdmodeltesterself self configtester configtesterself configclassbigbirdconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforpretrainingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforpretrainingconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testmodelasdecoderself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckmodelasdecoderconfigandinputs def testmodelasdecoderwithdefaultinputmaskself this regression test was failing with pytorch 1 3 config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask self modeltester prepareconfigandinputsfordecoder inputmask none self modeltester createandcheckmodelasdecoder config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask def testretaingradhiddenstatesattentionsself bigbird cannot keep gradients in attentions when attentiontypeblocksparse if self modeltester attentiontype originalfull super testretaingradhiddenstatesattentions slow def testmodelfrompretrainedself for modelname in bigbirdpretrainedmodelarchivelist 1 model bigbirdforpretraining frompretrainedmodelname self assertisnotnonemodel def testmodelvariousattntypeself configandinputs self modeltester prepareconfigandinputs for type in originalfull blocksparse configandinputs0 attentiontype type self modeltester createandcheckmodelconfigandinputs def testfastintegrationself fmt off inputids torch tensor 6 117 33 36 70 22 63 31 71 72 88 58 109 49 48 116 92 6 19 95 118 100 80 111 93 2 31 84 26 5 6 82 46 96 109 4 39 19 109 13 92 31 36 90 111 18 75 6 56 74 16 42 56 92 69 108 127 81 82 41 106 19 44 24 82 121 120 65 36 26 72 13 36 98 43 64 8 53 100 92 51 122 66 17 61 50 104 127 26 35 94 23 110 71 80 67 109 111 44 19 51 41 86 71 76 44 18 68 44 77 107 81 98 126 100 2 49 98 84 39 23 98 52 46 10 82 121 73 6 117 33 36 70 22 63 31 71 72 88 58 109 49 48 116 92 6 19 95 118 100 80 111 93 2 31 84 26 5 6 82 46 96 109 4 39 19 109 13 92 31 36 90 111 18 75 6 56 74 16 42 56 92 69 108 127 81 82 41 106 19 44 24 82 121 120 65 36 26 72 13 36 98 43 64 8 53 100 92 51 12 66 17 61 50 104 127 26 35 94 23 110 71 80 67 109 111 44 19 51 41 86 71 76 28 18 68 44 77 107 81 98 126 100 2 49 18 84 39 23 98 52 46 10 82 121 73 noqa e231 dtypetorch long devicetorchdevice fmt on inputids inputids self modeltester vocabsize inputids1 inputids1 1 attentionmask torch onesinputids shape devicetorchdevice attentionmask 10 0 config self modeltester prepareconfigandinputs torch manualseed0 model bigbirdmodelconfig eval totorchdevice with torch nograd hiddenstates modelinputids attentionmaskattentionmask lasthiddenstate self asserttrue torch allclose hiddenstates0 0 5 torch tensor1 4825 0 0774 0 8226 0 2962 0 9593 devicetorchdevice atol1e3 def testautopaddingself self modeltester seqlength 241 configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforautopaddingconfigandinputs def testforchangetofullattnself self modeltester seqlength 9 configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforchangetofullattnconfigandinputs unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass overwrite from common in order to skip the check on attentions def checkptflaxoutputsself fxoutputs ptoutputs modelclass tol1e5 nameoutputs attributesnone bigbirdblocksparseattention in flaxbigbird returns attentionprobs none while in pytorch version an effort was done to return attentionprobs yet to be verified if name startswithoutputs attentions return else super checkptflaxoutputsfxoutputs ptoutputs modelclass tol name attributes requiretorch slow class bigbirdmodelintegrationtestunittest testcase we can have this true once blocksparse attnprobs works accurately testattentionprobs false def getdummyinputidsself fmt off ids torch tensor 6 117 33 36 70 22 63 31 71 72 88 58 109 49 48 116 92 6 19 95 118 100 80 111 93 2 31 84 26 5 6 82 46 96 109 4 39 19 109 13 92 31 36 90 111 18 75 6 56 74 16 42 56 92 69 108 127 81 82 41 106 19 44 24 82 121 120 65 36 26 72 13 36 98 43 64 8 53 100 92 51 122 66 17 61 50 104 127 26 35 94 23 110 71 80 67 109 111 44 19 51 41 86 71 76 44 18 68 44 77 107 81 98 126 100 2 49 98 84 39 23 98 52 46 10 82 121 73 noqa e231 dtypetorch long devicetorchdevice fmt on return ids def testinferenceblocksparsepretrainingself model bigbirdforpretraining frompretrainedgooglebigbirdrobertabase attentiontypeblocksparse model totorchdevice inputids torch tensor20920 232 328 1437 1024 dtypetorch long devicetorchdevice with torch nograd outputs modelinputids predictionlogits outputs predictionlogits seqrelationshiplogits outputs seqrelationshiplogits self assertequalpredictionlogits shape torch size1 4096 50358 self assertequalseqrelationshiplogits shape torch size1 2 expectedpredictionlogitsslice torch tensor 0 5583 0 0475 0 2508 7 4423 0 7409 1 4460 0 7593 7 7010 1 9150 3 1395 5 8840 9 3498 0 1854 1 4640 2 2052 3 7968 devicetorchdevice self asserttrue torch allclosepredictionlogits0 128 132 128 132 expectedpredictionlogitsslice atol1e4 expectedseqrelationshiplogits torch tensor46 9465 47 9517 devicetorchdevice self asserttruetorch allcloseseqrelationshiplogits expectedseqrelationshiplogits atol1e4 def testinferencefullpretrainingself model bigbirdforpretraining frompretrainedgooglebigbirdrobertabase attentiontypeoriginalfull model totorchdevice inputids torch tensor20920 232 328 1437 512 dtypetorch long devicetorchdevice with torch nograd outputs modelinputids predictionlogits outputs predictionlogits seqrelationshiplogits outputs seqrelationshiplogits self assertequalpredictionlogits shape torch size1 512 4 50358 self assertequalseqrelationshiplogits shape torch size1 2 expectedpredictionlogitsslice torch tensor 0 1499 1 1217 0 1990 8 4499 2 7757 3 0687 4 8577 7 5156 1 5446 0 1982 4 3016 10 4281 1 3705 4 0130 3 9629 5 1526 devicetorchdevice self asserttrue torch allclosepredictionlogits0 128 132 128 132 expectedpredictionlogitsslice atol1e4 expectedseqrelationshiplogits torch tensor41 4503 41 2406 devicetorchdevice self asserttruetorch allcloseseqrelationshiplogits expectedseqrelationshiplogits atol1e4 def testblocksparseattentionprobsself if not self testattentionprobs return model bigbirdmodel frompretrained googlebigbirdrobertabase attentiontypeblocksparse numrandomblocks3 blocksize16 model totorchdevice model eval config model config inputids self getdummyinputids hiddenstates model embeddingsinputids batchsize seqlen hiddenstates size attnmask torch onesbatchsize seqlen devicetorchdevice dtypetorch float toseqlength fromseqlength seqlen fromblocksize toblocksize config blocksize blockedmask bandmask frommask tomask model createmasksforblocksparseattn attnmask config blocksize fromblockedmask toblockedmask blockedmask for i in rangeconfig numhiddenlayers pointer model encoder layeri attention self querylayer pointer transposeforscorespointer queryhiddenstates keylayer pointer transposeforscorespointer keyhiddenstates valuelayer pointer transposeforscorespointer valuehiddenstates contextlayer attentionprobs pointer bigbirdblocksparseattention querylayer keylayer valuelayer bandmask frommask tomask fromblockedmask toblockedmask pointer numattentionheads pointer numrandomblocks pointer attentionheadsize fromblocksize toblocksize batchsize fromseqlength toseqlength seedpointer seed planfromlengthnone plannumrandblocksnone outputattentionstrue contextlayer contextlayer contiguous viewbatchsize fromseqlength 1 cl torch einsumbhqk bhkdbhqd attentionprobs valuelayer cl cl viewcontextlayer size self asserttruetorch allclosecontextlayer cl atol0 001 def testblocksparsecontextlayerself model bigbirdmodel frompretrained googlebigbirdrobertabase attentiontypeblocksparse numrandomblocks3 blocksize16 model totorchdevice model eval config model config inputids self getdummyinputids dummyhiddenstates model embeddingsinputids attnmask torch oneslikeinputids devicetorchdevice blockedmask bandmask frommask tomask model createmasksforblocksparseattn attnmask config blocksize targetedcl torch tensor 0 1870 1 5248 0 2333 0 0483 0 0952 1 8359 0 0142 0 1239 0 0083 0 0045 0 0601 0 1243 0 1329 0 1524 0 2347 0 0894 0 2248 0 2461 0 0645 0 0109 0 0418 0 1463 0 1290 0 1638 0 2489 0 0799 0 2341 0 2406 0 0524 0 0106 0 1859 1 5182 0 2324 0 0473 0 0952 1 8295 0 0148 0 1242 0 0080 0 0045 0 1879 1 5300 0 2334 0 0480 0 0967 1 8428 0 0137 0 1256 0 0087 0 0050 0 1852 1 5149 0 2330 0 0492 0 0936 1 8236 0 0154 0 1210 0 0080 0 0048 0 1857 1 5186 0 2331 0 0484 0 0940 1 8285 0 0148 0 1224 0 0077 0 0045 0 1884 1 5336 0 2334 0 0469 0 0974 1 8477 0 0132 0 1266 0 0085 0 0046 0 1881 1 5308 0 2334 0 0479 0 0969 1 8438 0 0136 0 1258 0 0088 0 0050 0 1849 1 5143 0 2329 0 0491 0 0930 1 8230 0 0156 0 1209 0 0074 0 0047 0 1878 1 5299 0 2333 0 0472 0 0967 1 8434 0 0137 0 1257 0 0084 0 0048 0 1873 1 5260 0 2333 0 0478 0 0961 1 8383 0 0142 0 1245 0 0083 0 0048 0 1849 1 5145 0 2327 0 0491 0 0935 1 8237 0 0156 0 1215 0 0083 0 0046 0 1866 1 5232 0 2332 0 0488 0 0950 1 8342 0 0143 0 1237 0 0084 0 0047 devicetorchdevice contextlayer model encoder layer0 attention self dummyhiddenstates bandmaskbandmask frommaskfrommask tomasktomask fromblockedmaskblockedmask toblockedmaskblockedmask contextlayer contextlayer0 self assertequalcontextlayer shape torch size1 128 768 self asserttruetorch allclosecontextlayer0 64 78 300 310 targetedcl atol0 0001 def testtokenizerinferenceself tokenizer bigbirdtokenizer frompretrainedgooglebigbirdrobertabase model bigbirdmodel frompretrained googlebigbirdrobertabase attentiontypeblocksparse numrandomblocks3 blocksize16 model totorchdevice text transformerbased models are unable to process long sequences due to their selfattention operation which scales quadratically with the sequence length to address this limitation we introduce the longformer with an attention mechanism that scales linearly with sequence length making it easy to process documents of thousands of tokens or longer longformers attention mechanism is a dropin replacement for the standard selfattention and combines a local windowed attention with a task motivated global attention following prior work on longsequence transformers we evaluate longformer on characterlevel language modeling and achieve stateoftheart results on text8 and enwik8 in contrast to most prior work we also pretrain longformer and finetune it on a variety of downstream tasks our pretrained longformer consistently outperforms roberta on long document tasks and sets new stateoftheart results on wikihop and triviaqa inputs tokenizertext for k in inputs inputsk torch tensorinputsk devicetorchdevice dtypetorch long prediction modelinputs prediction prediction0 self assertequalprediction shape torch size1 199 768 expectedprediction torch tensor 0 1887 0 0474 0 2604 0 1453 0 0651 0 1999 0 1797 0 1161 0 2833 0 3036 0 6910 0 1123 0 2836 0 4644 0 0111 0 1530 0 3919 0 2823 0 4192 0 1687 0 2168 0 1956 0 4050 0 0925 0 2597 0 0884 0 1258 0 1119 0 1127 0 1203 0 1924 0 2859 0 1362 0 1315 0 2693 0 1027 0 3169 0 2266 0 4419 0 6740 0 2366 0 1452 0 2589 0 0579 0 0358 0 2021 0 3112 0 1392 devicetorchdevice self asserttruetorch allcloseprediction0 52 64 320 324 expectedprediction atol1e4 def testinferencequestionansweringself tokenizer bigbirdtokenizer frompretrainedgooglebigbirdbasetriviaitc model bigbirdforquestionanswering frompretrained googlebigbirdbasetriviaitc attentiontypeblocksparse blocksize16 numrandomblocks3 model totorchdevice context the bigbird model was proposed in big bird transformers for longer sequences by zaheer manzil and guruganesh guru and dubey kumar avinava and ainslie joshua and alberti chris and ontanon santiago and pham philip and ravula anirudh and wang qifan and yang li and others bigbird is a sparseattention based transformer which extends transformer based models such as bert to much longer sequences in addition to sparse attention bigbird also applies global attention as well as random attention to the input sequence theoretically it has been shown that applying sparse global and random attention approximates full attention while being computationally much more efficient for longer sequences as a consequence of the capability to handle longer context bigbird has shown improved performance on various long document nlp tasks such as question answering and summarization compared to bert or roberta question which is better for longer sequences bigbird or bert what is the benefit of using bigbird over bert inputs tokenizer question context context paddingtrue returntensorspt addspecialtokenstrue maxlength256 truncationtrue inputs k v totorchdevice for k v in inputs items startlogits endlogits modelinputs totuple fmt off targetstartlogits torch tensor 8 5622 9 6209 14 3351 8 7032 11 8596 7 7446 9 6730 13 6063 8 9651 11 7417 8 2641 8 7056 13 4116 5 6600 8 8316 10 4148 12 2180 7 7979 12 5274 6 0685 10 3373 11 3128 6 6456 14 4030 6 8292 14 5383 11 5638 6 3326 11 5293 1 8434 10 0013 7 6150 10 7384 13 1179 10 1837 13 7700 10 0186 11 7335 13 3411 10 0188 13 4235 9 9381 10 4252 13 1281 8 2022 10 4326 11 5542 14 1549 10 7546 13 4691 8 2744 11 4324 13 3773 9 8284 14 5825 8 7471 14 7050 8 0364 11 3627 6 4638 11 7031 14 3446 9 9425 8 0088 noqa e231 devicetorchdevice targetendlogits torch tensor 12 1736 8 8487 14 8877 11 6713 15 1165 12 2396 7 6828 15 4153 12 2528 14 3671 12 3596 7 4272 14 9615 13 6356 11 7939 9 9767 14 8112 8 9567 15 8798 11 5291 9 4249 14 7544 7 9387 16 2789 8 9702 15 3111 11 5585 7 9992 4 1127 10 3209 8 3926 10 2005 11 1375 15 4027 12 6861 16 9884 13 7093 10 3560 15 7228 12 9290 15 8519 13 7953 10 2460 15 7198 14 2078 12 8477 11 4861 16 1017 11 8900 16 4488 13 2959 10 3980 15 4874 10 3539 16 8263 10 9973 17 0344 9 2751 10 1196 13 8907 12 1025 13 0628 12 8530 13 8173 noqa e321 devicetorchdevice fmt on self asserttruetorch allclosestartlogits 64 96 targetstartlogits atol1e4 self asserttruetorch allcloseendlogits 64 96 targetendlogits atol1e4 inputids inputsinputids tolist answer inputidsitorch argmaxstartlogits dim1i torch argmaxendlogits dim1i 1 for i in rangeleninputids answer tokenizer batchdecodeanswer self asserttrueanswer bigbird global attention def testfillmaskself tokenizer bigbirdtokenizer frompretrainedgooglebigbirdrobertabase model bigbirdformaskedlm frompretrainedgooglebigbirdrobertabase model totorchdevice inputids tokenizerthe goal of life is mask returntensorspt inputids totorchdevice logits modelinputids logits mask is token at 6th position predtoken tokenizer decodetorch argmaxlogits0 6 7 axis1 self assertequalpredtoken happiness def testautopaddingself model bigbirdmodel frompretrained googlebigbirdrobertabase attentiontypeblocksparse numrandomblocks3 blocksize16 model totorchdevice model eval inputids torch tensor200 10 40 2 1 devicetorchdevice dtypetorch long with torch nograd output modelinputids totuple0 fmt off target torch tensor 0 129420 0 164740 0 042422 0 336030 0 094379 0 033794 0 384590 0 229660 0 196500 0 108020 0 000154 0 168800 0 165820 0 313670 0 101240 0 035145 0 381880 0 213730 0 201080 0 077443 0 053754 0 166350 0 225520 0 272900 0 119670 0 019987 0 348670 0 199190 0 181600 0 084640 0 063636 0 187110 0 237010 0 297380 0 126300 0 020025 0 268490 0 191820 0 192300 0 035077 0 073893 0 184790 0 188870 0 297860 0 134280 0 028972 0 174650 0 186890 0 180530 0 006851 0 005253 0 169360 0 123100 0 302550 0 126930 0 024188 0 133410 0 200600 0 168210 0 001006 0 093336 0 175370 0 004768 0 333170 0 114330 0 034168 0 120960 0 203570 0 162810 0 005757 0 160210 0 169310 0 049064 0 331950 0 115730 0 027062 0 143600 0 205310 0 144580 0 026746 0 193200 0 156820 0 079422 0 351600 0 106450 0 032174 0 245690 0 210250 0 173480 0 043914 0 167980 0 153050 0 059764 0 357890 0 103910 0 031481 0 334190 0 208960 0 178180 0 072165 0 136990 0 156950 0 012099 0 353140 0 096996 0 025864 0 376340 0 216050 0 171820 0 089963 0 041143 0 167060 0 079754 0 353220 0 093247 0 019867 0 385810 0 214340 0 191800 0 065946 0 040373 0 158610 0 152570 0 312930 0 110590 0 012282 0 345270 0 204040 0 176500 0 064972 0 043762 0 166450 0 179500 0 317930 0 117280 0 004040 0 304490 0 201380 0 182780 0 044000 noqa e231 devicetorchdevice fmt on self assertequaloutput shape torch size1 241 768 self asserttruetorch allcloseoutput0 64 78 300 310 target atol0 0001 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bigbird model first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice the config should not be changed head masking pruning is currently not supported for big bird torchscript should be possible but takes prohibitively long to test also torchscript is not an important feature to have in the beginning special case for forpretraining model this regression test was failing with pytorch 1 3 bigbird cannot keep gradients in attentions when attention_type block_sparse fmt off noqa e231 fmt on overwrite from common in order to skip the check on attentions bigbird_block_sparse_attention in flaxbigbird returns attention_probs none while in pytorch version an effort was done to return attention_probs yet to be verified we can have this true once block_sparse attn_probs works accurately fmt off noqa e231 fmt on asserting if outputted attention matrix is similar to hard coded attention matrix fmt off noqa e231 noqa e321 fmt on mask is token at 6th position fmt off noqa e231 fmt on
import unittest from transformers import BigBirdConfig, is_torch_available from transformers.models.auto import get_values from transformers.models.big_bird.tokenization_big_bird import BigBirdTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, BigBirdForPreTraining, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, BigBirdModel, ) from transformers.models.big_bird.modeling_big_bird import BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST class BigBirdModelTester: def __init__( self, parent, batch_size=7, seq_length=128, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu_new", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=256, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, attention_type="block_sparse", use_bias=True, rescale_embeddings=False, block_size=8, num_rand_blocks=3, position_embedding_type="absolute", scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.attention_type = attention_type self.use_bias = use_bias self.rescale_embeddings = rescale_embeddings self.block_size = block_size self.num_rand_blocks = num_rand_blocks self.position_embedding_type = position_embedding_type def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return BigBirdConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_encoder_decoder=False, initializer_range=self.initializer_range, attention_type=self.attention_type, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, block_size=self.block_size, num_random_blocks=self.num_rand_blocks, position_embedding_type=self.position_embedding_type, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BigBirdModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BigBirdForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, config.num_labels)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = BigBirdModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = BigBirdForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BigBirdForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = BigBirdForCausalLM(config=config) model.to(torch_device) model.eval() outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BigBirdForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BigBirdForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BigBirdForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = BigBirdForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def create_and_check_for_auto_padding( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = BigBirdModel(config) model.to(torch_device) model.eval() result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_change_to_full_attn( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = BigBirdModel(config) model.to(torch_device) model.eval() result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertTrue(model.config.attention_type == "block_sparse") @require_torch class BigBirdModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False test_torchscript = False all_model_classes = ( ( BigBirdModel, BigBirdForPreTraining, BigBirdForMaskedLM, BigBirdForCausalLM, BigBirdForMultipleChoice, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (BigBirdForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": BigBirdModel, "fill-mask": BigBirdForMaskedLM, "question-answering": BigBirdForQuestionAnswering, "text-classification": BigBirdForSequenceClassification, "text-generation": BigBirdForCausalLM, "token-classification": BigBirdForTokenClassification, "zero-shot": BigBirdForSequenceClassification, } if is_torch_available() else {} ) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = BigBirdModelTester(self) self.config_tester = ConfigTester(self, config_class=BigBirdConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_retain_grad_hidden_states_attentions(self): if self.model_tester.attention_type == "original_full": super().test_retain_grad_hidden_states_attentions() @slow def test_model_from_pretrained(self): for model_name in BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BigBirdForPreTraining.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_various_attn_type(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["original_full", "block_sparse"]: config_and_inputs[0].attention_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_fast_integration(self): input_ids = torch.tensor( [[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73],[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 12, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 28, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 18, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], dtype=torch.long, device=torch_device, ) input_ids = input_ids % self.model_tester.vocab_size input_ids[1] = input_ids[1] - 1 attention_mask = torch.ones((input_ids.shape), device=torch_device) attention_mask[:, :-10] = 0 config, _, _, _, _, _, _ = self.model_tester.prepare_config_and_inputs() torch.manual_seed(0) model = BigBirdModel(config).eval().to(torch_device) with torch.no_grad(): hidden_states = model(input_ids, attention_mask=attention_mask).last_hidden_state self.assertTrue( torch.allclose( hidden_states[0, 0, :5], torch.tensor([1.4825, 0.0774, 0.8226, -0.2962, -0.9593], device=torch_device), atol=1e-3, ) ) def test_auto_padding(self): self.model_tester.seq_length = 241 config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_auto_padding(*config_and_inputs) def test_for_change_to_full_attn(self): self.model_tester.seq_length = 9 config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_change_to_full_attn(*config_and_inputs) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): if name.startswith("outputs.attentions"): return else: super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes) @require_torch @slow class BigBirdModelIntegrationTest(unittest.TestCase): test_attention_probs = False def _get_dummy_input_ids(self): ids = torch.tensor( [[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], dtype=torch.long, device=torch_device, ) return ids def test_inference_block_sparse_pretraining(self): model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="block_sparse") model.to(torch_device) input_ids = torch.tensor([[20920, 232, 328, 1437] * 1024], dtype=torch.long, device=torch_device) with torch.no_grad(): outputs = model(input_ids) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits self.assertEqual(prediction_logits.shape, torch.Size((1, 4096, 50358))) self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2))) expected_prediction_logits_slice = torch.tensor( [ [-0.5583, 0.0475, -0.2508, 7.4423], [0.7409, 1.4460, -0.7593, 7.7010], [1.9150, 3.1395, 5.8840, 9.3498], [-0.1854, -1.4640, -2.2052, 3.7968], ], device=torch_device, ) self.assertTrue( torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4) ) expected_seq_relationship_logits = torch.tensor([[46.9465, 47.9517]], device=torch_device) self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4)) def test_inference_full_pretraining(self): model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="original_full") model.to(torch_device) input_ids = torch.tensor([[20920, 232, 328, 1437] * 512], dtype=torch.long, device=torch_device) with torch.no_grad(): outputs = model(input_ids) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits self.assertEqual(prediction_logits.shape, torch.Size((1, 512 * 4, 50358))) self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2))) expected_prediction_logits_slice = torch.tensor( [ [0.1499, -1.1217, 0.1990, 8.4499], [-2.7757, -3.0687, -4.8577, 7.5156], [1.5446, 0.1982, 4.3016, 10.4281], [-1.3705, -4.0130, -3.9629, 5.1526], ], device=torch_device, ) self.assertTrue( torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4) ) expected_seq_relationship_logits = torch.tensor([[41.4503, 41.2406]], device=torch_device) self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4)) def test_block_sparse_attention_probs(self): if not self.test_attention_probs: return model = BigBirdModel.from_pretrained( "google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16 ) model.to(torch_device) model.eval() config = model.config input_ids = self._get_dummy_input_ids() hidden_states = model.embeddings(input_ids) batch_size, seqlen, _ = hidden_states.size() attn_mask = torch.ones(batch_size, seqlen, device=torch_device, dtype=torch.float) to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = config.block_size blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn( attn_mask, config.block_size ) from_blocked_mask = to_blocked_mask = blocked_mask for i in range(config.num_hidden_layers): pointer = model.encoder.layer[i].attention.self query_layer = pointer.transpose_for_scores(pointer.query(hidden_states)) key_layer = pointer.transpose_for_scores(pointer.key(hidden_states)) value_layer = pointer.transpose_for_scores(pointer.value(hidden_states)) context_layer, attention_probs = pointer.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, pointer.num_attention_heads, pointer.num_random_blocks, pointer.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=pointer.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=True, ) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) cl = torch.einsum("bhqk,bhkd->bhqd", attention_probs, value_layer) cl = cl.view(context_layer.size()) self.assertTrue(torch.allclose(context_layer, cl, atol=0.001)) def test_block_sparse_context_layer(self): model = BigBirdModel.from_pretrained( "google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16 ) model.to(torch_device) model.eval() config = model.config input_ids = self._get_dummy_input_ids() dummy_hidden_states = model.embeddings(input_ids) attn_mask = torch.ones_like(input_ids, device=torch_device) blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn( attn_mask, config.block_size ) targeted_cl = torch.tensor( [ [0.1870, 1.5248, 0.2333, -0.0483, -0.0952, 1.8359, -0.0142, 0.1239, 0.0083, -0.0045], [-0.0601, 0.1243, 0.1329, -0.1524, 0.2347, 0.0894, -0.2248, -0.2461, -0.0645, -0.0109], [-0.0418, 0.1463, 0.1290, -0.1638, 0.2489, 0.0799, -0.2341, -0.2406, -0.0524, 0.0106], [0.1859, 1.5182, 0.2324, -0.0473, -0.0952, 1.8295, -0.0148, 0.1242, 0.0080, -0.0045], [0.1879, 1.5300, 0.2334, -0.0480, -0.0967, 1.8428, -0.0137, 0.1256, 0.0087, -0.0050], [0.1852, 1.5149, 0.2330, -0.0492, -0.0936, 1.8236, -0.0154, 0.1210, 0.0080, -0.0048], [0.1857, 1.5186, 0.2331, -0.0484, -0.0940, 1.8285, -0.0148, 0.1224, 0.0077, -0.0045], [0.1884, 1.5336, 0.2334, -0.0469, -0.0974, 1.8477, -0.0132, 0.1266, 0.0085, -0.0046], [0.1881, 1.5308, 0.2334, -0.0479, -0.0969, 1.8438, -0.0136, 0.1258, 0.0088, -0.0050], [0.1849, 1.5143, 0.2329, -0.0491, -0.0930, 1.8230, -0.0156, 0.1209, 0.0074, -0.0047], [0.1878, 1.5299, 0.2333, -0.0472, -0.0967, 1.8434, -0.0137, 0.1257, 0.0084, -0.0048], [0.1873, 1.5260, 0.2333, -0.0478, -0.0961, 1.8383, -0.0142, 0.1245, 0.0083, -0.0048], [0.1849, 1.5145, 0.2327, -0.0491, -0.0935, 1.8237, -0.0156, 0.1215, 0.0083, -0.0046], [0.1866, 1.5232, 0.2332, -0.0488, -0.0950, 1.8342, -0.0143, 0.1237, 0.0084, -0.0047], ], device=torch_device, ) context_layer = model.encoder.layer[0].attention.self( dummy_hidden_states, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_mask, to_blocked_mask=blocked_mask, ) context_layer = context_layer[0] self.assertEqual(context_layer.shape, torch.Size((1, 128, 768))) self.assertTrue(torch.allclose(context_layer[0, 64:78, 300:310], targeted_cl, atol=0.0001)) def test_tokenizer_inference(self): tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") model = BigBirdModel.from_pretrained( "google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16 ) model.to(torch_device) text = [ "Transformer-based models are unable to process long sequences due to their self-attention operation," " which scales quadratically with the sequence length. To address this limitation, we introduce the" " Longformer with an attention mechanism that scales linearly with sequence length, making it easy to" " process documents of thousands of tokens or longer. Longformer’s attention mechanism is a drop-in" " replacement for the standard self-attention and combines a local windowed attention with a task" " motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer" " on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In" " contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream" " tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new" " state-of-the-art results on WikiHop and TriviaQA." ] inputs = tokenizer(text) for k in inputs: inputs[k] = torch.tensor(inputs[k], device=torch_device, dtype=torch.long) prediction = model(**inputs) prediction = prediction[0] self.assertEqual(prediction.shape, torch.Size((1, 199, 768))) expected_prediction = torch.tensor( [ [0.1887, -0.0474, 0.2604, 0.1453], [0.0651, 0.1999, 0.1797, 0.1161], [0.2833, -0.3036, 0.6910, 0.1123], [0.2836, -0.4644, -0.0111, 0.1530], [0.3919, -0.2823, 0.4192, 0.1687], [0.2168, -0.1956, 0.4050, 0.0925], [0.2597, -0.0884, 0.1258, 0.1119], [0.1127, -0.1203, 0.1924, 0.2859], [0.1362, -0.1315, 0.2693, 0.1027], [-0.3169, -0.2266, 0.4419, 0.6740], [0.2366, -0.1452, 0.2589, 0.0579], [0.0358, -0.2021, 0.3112, -0.1392], ], device=torch_device, ) self.assertTrue(torch.allclose(prediction[0, 52:64, 320:324], expected_prediction, atol=1e-4)) def test_inference_question_answering(self): tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-base-trivia-itc") model = BigBirdForQuestionAnswering.from_pretrained( "google/bigbird-base-trivia-itc", attention_type="block_sparse", block_size=16, num_random_blocks=3 ) model.to(torch_device) context = ( "The BigBird model was proposed in Big Bird: Transformers for Longer Sequences by Zaheer, Manzil and" " Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago" " and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a" " sparse-attention based transformer which extends Transformer based models, such as BERT to much longer" " sequences. In addition to sparse attention, BigBird also applies global attention as well as random" " attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and" " random attention approximates full attention, while being computationally much more efficient for longer" " sequences. As a consequence of the capability to handle longer context, BigBird has shown improved" " performance on various long document NLP tasks, such as question answering and summarization, compared" " to BERT or RoBERTa." ) question = [ "Which is better for longer sequences- BigBird or BERT?", "What is the benefit of using BigBird over BERT?", ] inputs = tokenizer( question, [context, context], padding=True, return_tensors="pt", add_special_tokens=True, max_length=256, truncation=True, ) inputs = {k: v.to(torch_device) for k, v in inputs.items()} start_logits, end_logits = model(**inputs).to_tuple() target_start_logits = torch.tensor( [[-8.5622, -9.6209, -14.3351, -8.7032, -11.8596, -7.7446, -9.6730, -13.6063, -8.9651, -11.7417, -8.2641, -8.7056, -13.4116, -5.6600, -8.8316, -10.4148, -12.2180, -7.7979, -12.5274, -6.0685, -10.3373, -11.3128, -6.6456, -14.4030, -6.8292, -14.5383, -11.5638, -6.3326, 11.5293, -1.8434, -10.0013, -7.6150], [-10.7384, -13.1179, -10.1837, -13.7700, -10.0186, -11.7335, -13.3411, -10.0188, -13.4235, -9.9381, -10.4252, -13.1281, -8.2022, -10.4326, -11.5542, -14.1549, -10.7546, -13.4691, -8.2744, -11.4324, -13.3773, -9.8284, -14.5825, -8.7471, -14.7050, -8.0364, -11.3627, -6.4638, -11.7031, -14.3446, -9.9425, -8.0088]], device=torch_device, ) target_end_logits = torch.tensor( [[-12.1736, -8.8487, -14.8877, -11.6713, -15.1165, -12.2396, -7.6828, -15.4153, -12.2528, -14.3671, -12.3596, -7.4272, -14.9615, -13.6356, -11.7939, -9.9767, -14.8112, -8.9567, -15.8798, -11.5291, -9.4249, -14.7544, -7.9387, -16.2789, -8.9702, -15.3111, -11.5585, -7.9992, -4.1127, 10.3209, -8.3926, -10.2005], [-11.1375, -15.4027, -12.6861, -16.9884, -13.7093, -10.3560, -15.7228, -12.9290, -15.8519, -13.7953, -10.2460, -15.7198, -14.2078, -12.8477, -11.4861, -16.1017, -11.8900, -16.4488, -13.2959, -10.3980, -15.4874, -10.3539, -16.8263, -10.9973, -17.0344, -9.2751, -10.1196, -13.8907, -12.1025, -13.0628, -12.8530, -13.8173]], device=torch_device, ) self.assertTrue(torch.allclose(start_logits[:, 64:96], target_start_logits, atol=1e-4)) self.assertTrue(torch.allclose(end_logits[:, 64:96], target_end_logits, atol=1e-4)) input_ids = inputs["input_ids"].tolist() answer = [ input_ids[i][torch.argmax(start_logits, dim=-1)[i] : torch.argmax(end_logits, dim=-1)[i] + 1] for i in range(len(input_ids)) ] answer = tokenizer.batch_decode(answer) self.assertTrue(answer == ["BigBird", "global attention"]) def test_fill_mask(self): tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base") model.to(torch_device) input_ids = tokenizer("The goal of life is [MASK] .", return_tensors="pt").input_ids.to(torch_device) logits = model(input_ids).logits pred_token = tokenizer.decode(torch.argmax(logits[0, 6:7], axis=-1)) self.assertEqual(pred_token, "happiness") def test_auto_padding(self): model = BigBirdModel.from_pretrained( "google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16 ) model.to(torch_device) model.eval() input_ids = torch.tensor([200 * [10] + 40 * [2] + [1]], device=torch_device, dtype=torch.long) with torch.no_grad(): output = model(input_ids).to_tuple()[0] target = torch.tensor( [[-0.129420, -0.164740, 0.042422, -0.336030, 0.094379, 0.033794, 0.384590, 0.229660, -0.196500, 0.108020], [-0.000154, -0.168800, 0.165820, -0.313670, 0.101240, 0.035145, 0.381880, 0.213730, -0.201080, 0.077443], [0.053754, -0.166350, 0.225520, -0.272900, 0.119670, 0.019987, 0.348670, 0.199190, -0.181600, 0.084640], [0.063636, -0.187110, 0.237010, -0.297380, 0.126300, 0.020025, 0.268490, 0.191820, -0.192300, 0.035077], [0.073893, -0.184790, 0.188870, -0.297860, 0.134280, 0.028972, 0.174650, 0.186890, -0.180530, 0.006851], [0.005253, -0.169360, 0.123100, -0.302550, 0.126930, 0.024188, 0.133410, 0.200600, -0.168210, -0.001006], [-0.093336, -0.175370, -0.004768, -0.333170, 0.114330, 0.034168, 0.120960, 0.203570, -0.162810, -0.005757], [-0.160210, -0.169310, -0.049064, -0.331950, 0.115730, 0.027062, 0.143600, 0.205310, -0.144580, 0.026746], [-0.193200, -0.156820, -0.079422, -0.351600, 0.106450, 0.032174, 0.245690, 0.210250, -0.173480, 0.043914], [-0.167980, -0.153050, -0.059764, -0.357890,0.103910, 0.031481, 0.334190, 0.208960,-0.178180, 0.072165], [-0.136990, -0.156950, -0.012099, -0.353140,0.096996, 0.025864, 0.376340, 0.216050, -0.171820, 0.089963], [-0.041143, -0.167060, 0.079754, -0.353220, 0.093247, 0.019867, 0.385810, 0.214340, -0.191800, 0.065946],[0.040373, -0.158610, 0.152570, -0.312930, 0.110590, 0.012282, 0.345270, 0.204040, -0.176500, 0.064972], [0.043762, -0.166450, 0.179500, -0.317930, 0.117280, -0.004040, 0.304490, 0.201380, -0.182780, 0.044000]], device=torch_device, ) self.assertEqual(output.shape, torch.Size((1, 241, 768))) self.assertTrue(torch.allclose(output[0, 64:78, 300:310], target, atol=0.0001))
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from testmodelingflaxcommon because it takes much longer than other models copied from testmodelingflaxcommon because it takes much longer than other models copied from testmodelingflaxcommon because it takes much longer than other models copied from testmodelingflaxcommon because it takes much longer than other models copied from testmodelingflaxcommon because it takes much longer than other models overwrite from common in order to skip the check on attentions bigbirdblocksparseattention in flaxbigbird returns attentionprobs none while in pytorch version an effort was done to return attentionprobs yet to be verified 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from test_modeling_flax_common because it takes much longer than other models copied from test_modeling_flax_common because it takes much longer than other models copied from test_modeling_flax_common because it takes much longer than other models copied from test_modeling_flax_common because it takes much longer than other models copied from test_modeling_flax_common because it takes much longer than other models overwrite from common in order to skip the check on attentions bigbird_block_sparse_attention in flaxbigbird returns attention_probs none while in pytorch version an effort was done to return attention_probs yet to be verified
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class FlaxBigBirdModelTester(unittest.TestCase): def __init__( self, parent, batch_size=2, seq_length=56, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=2, intermediate_size=7, hidden_act="gelu_new", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, attention_type="block_sparse", use_bias=True, rescale_embeddings=False, block_size=2, num_random_blocks=3, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices self.rescale_embeddings = rescale_embeddings self.attention_type = attention_type self.use_bias = use_bias self.block_size = block_size self.num_random_blocks = num_random_blocks def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = BigBirdConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, ) return config, input_ids, token_type_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class FlaxBigBirdModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) test_attn_probs = False test_mismatched_shapes = False def setUp(self): self.model_tester = FlaxBigBirdModelTester(self) @slow def test_from_pretrained_save_pretrained(self): super().test_from_pretrained_save_pretrained() @slow def test_from_pretrained_with_no_automatic_init(self): super().test_from_pretrained_with_no_automatic_init() @slow def test_no_automatic_init(self): super().test_no_automatic_init() @slow def test_hidden_states_output(self): super().test_hidden_states_output() @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("google/bigbird-roberta-base") self.assertIsNotNone(model) def test_attention_outputs(self): if self.test_attn_probs: super().test_attention_outputs() @slow def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): if name.startswith("outputs.attentions"): return else: super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test converttokentoid and convertidtotoken token s tokenid 1 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 unk self assertequalvocabkeys1 s self assertequalvocabkeys1 mask self assertequallenvocabkeys 1004 def testvocabsizeself self assertequalself gettokenizer vocabsize 1000 def testrustandpythonfulltokenizersself if not self testrusttokenizer return tokenizer self gettokenizer rusttokenizer self getrusttokenizer sequence i was born in 92000 and this is fals tokens tokenizer tokenizesequence rusttokens rusttokenizer tokenizesequence self assertlistequaltokens rusttokens ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids rusttokenizer self getrusttokenizer ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids def testfulltokenizerself tokenizer bigbirdtokenizersamplevocab keepaccentstrue tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a t est self assertlistequal tokenizer converttokenstoidstokens 285 46 10 170 382 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s ids tokenizer converttokenstoidstokens self assertlistequal ids 8 21 84 55 24 19 7 0 602 347 347 347 3 12 66 46 72 80 6 0 4 backtokens tokenizer convertidstotokensids self assertlistequal backtokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline unk 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s unk cachedproperty def bigtokenizerself return bigbirdtokenizer frompretrainedgooglebigbirdrobertabase slow def testtokenizationbaseeasysymbolsself symbols hello world originaltokenizerencodings 65 18536 2260 101 66 self assertlistequaloriginaltokenizerencodings self bigtokenizer encodesymbols slow def testtokenizationbasehardsymbolsself symbols this is a very long text with a lot of weird characters such as also we will add words that should not exsist and be tokenized to unk such as saoneuhaoesuth originaltokenizerencodings 65 871 419 358 946 991 2521 452 358 1357 387 7751 3536 112 985 456 126 865 938 5400 5734 458 1368 467 786 2462 5246 1159 633 865 4519 457 582 852 2557 427 916 508 405 34324 497 391 408 11342 1244 385 100 938 985 456 574 362 12597 3200 3129 1172 66 fmt skip self assertlistequaloriginaltokenizerencodings self bigtokenizer encodesymbols requiretorch slow def testtorchencodeplussenttomodelself import torch from transformers import bigbirdconfig bigbirdmodel build sequence firsttentokens listself bigtokenizer getvocab keys 10 sequence joinfirsttentokens encodedsequence self bigtokenizer encodeplussequence returntensorspt returntokentypeidsfalse batchencodedsequence self bigtokenizer batchencodeplus sequence sequence returntensorspt returntokentypeidsfalse config bigbirdconfigattentiontypeoriginalfull model bigbirdmodelconfig assert model getinputembeddings weight shape0 self bigtokenizer vocabsize with torch nograd modelencodedsequence modelbatchencodedsequence slow def testspecialtokensself tokenizer bigbirdtokenizer frompretrainedgooglebigbirdrobertabase decodedtext tokenizer decodetokenizerparis is the mask inputids self asserttruedecodedtext cls paris is themask sep slow def testtokenizerintegrationself expectedencoding inputids 65 39286 458 36335 2001 456 13073 13266 455 113 7746 1741 11157 391 13073 13266 455 113 3967 35412 113 4936 109 3870 2377 113 30084 45720 458 134 17496 112 503 11672 113 118 112 5665 13347 38687 112 1496 31389 112 3268 47264 134 962 112 16377 8035 23130 430 12169 15518 28592 458 146 41697 109 391 12169 15518 16689 458 146 41358 109 452 726 4034 111 763 35412 5082 388 1903 111 9051 391 2870 48918 1900 1123 550 998 112 9586 15985 455 391 410 22955 37636 114 66 65 448 17496 419 3663 385 763 113 27533 2870 3283 13043 1639 24713 523 656 24013 18550 2521 517 27014 21244 420 1212 1465 391 927 4833 388 578 11786 114 66 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 65 484 2169 7687 21932 18146 726 363 17032 3391 114 66 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamegooglebigbirdrobertabase revision215c99f1600e06f83acce68422f2035b2b5c3510 coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test _convert_token_to_id and _convert_id_to_token fmt skip build sequence to reproduce wget https github com google research bigbird blob master bigbird vocab gpt2 model raw true mv gpt2 model raw true gpt2 model import tensorflow_text as tft import tensorflow as tf vocab_model_file gpt2 model tokenizer tft sentencepiecetokenizer model tf io gfile gfile vocab_model_file rb read ids tokenizer tokenize paris is the mask ids tf concat tf constant 65 ids tf constant 66 axis 0 detokenized tokenizer detokenize ids should give cls paris is the mask sep fmt skip
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SPIECE_UNDERLINE = "▁" SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class BigBirdTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BigBirdTokenizer rust_tokenizer_class = BigBirdTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() tokenizer = self.tokenizer_class(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[-1], "[MASK]") self.assertEqual(len(vocab_keys), 1_004) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_000) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_full_tokenizer(self): tokenizer = BigBirdTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def big_tokenizer(self): return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [65, 18536, 2260, 101, 66] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) original_tokenizer_encodings = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @require_torch @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import BigBirdConfig, BigBirdModel first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors="pt", return_token_type_ids=False) batch_encoded_sequence = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence], return_tensors="pt", return_token_type_ids=False ) config = BigBirdConfig(attention_type="original_full") model = BigBirdModel(config) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**encoded_sequence) model(**batch_encoded_sequence) @slow def test_special_tokens(self): tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") decoded_text = tokenizer.decode(tokenizer("Paris is the [MASK].").input_ids) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]") @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="google/bigbird-roberta-base", revision="215c99f1600e06f83acce68422f2035b2b5c3510", )
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bigbirdpegasus model import copy import tempfile import unittest from transformers import bigbirdpegasusconfig istorchavailable from transformers testingutils import requiresentencepiece requiretokenizers requiretorch requiretorchfp16 slow torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import bigbirdpegasusforcausallm bigbirdpegasusforconditionalgeneration bigbirdpegasusforquestionanswering bigbirdpegasusforsequenceclassification bigbirdpegasusmodel pegasustokenizer from transformers models bigbirdpegasus modelingbigbirdpegasus import bigbirdpegasusdecoder bigbirdpegasusencoder modelid googlebigbirdpegasuslargepubmed def preparebigbirdpegasusinputsdict config inputids decoderinputids attentionmasknone decoderattentionmasknone if attentionmask is none attentionmask inputids neconfig padtokenid if decoderattentionmask is none decoderattentionmask decoderinputids neconfig padtokenid inputdict inputids inputids decoderinputids decoderinputids attentionmask attentionmask decoderattentionmask attentionmask inputdict k inputdictk totorchdevice for k in inputdict return inputdict class bigbirdpegasusmodeltester def init self parent batchsize7 seqlength256 istrainingtrue uselabelsfalse vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize31 hiddenactgelufast hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings260 eostokenid1 padtokenid0 bostokenid2 attentiontypeblocksparse usebiasfalse blocksize16 numrandomblocks3 scaleembeddingtrue self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid self attentiontype attentiontype self usebias usebias self blocksize blocksize self numrandomblocks numrandomblocks self scaleembedding scaleembedding def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputids idstensorself batchsize self seqlength self vocabsize clamp 3 inputids 1 self eostokenid eos token decoderinputids idstensorself batchsize self seqlength self vocabsize config self getconfig inputsdict preparebigbirdpegasusinputsdictconfig inputids decoderinputids return config inputsdict def getconfigself return bigbirdpegasusconfig vocabsizeself vocabsize dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid attentiontypeself attentiontype usebiasself usebias blocksizeself blocksize numrandomblocksself numrandomblocks scaleembeddingself scaleembedding def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def createandcheckdecodermodelpastlargeinputsself config inputsdict model bigbirdpegasusmodelconfigconfig getdecoder totorchdevice eval inputids inputsdictinputids attentionmask inputsdictattentionmask first forward pass outputs modelinputids attentionmaskattentionmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e2 def checkencoderdecodermodelstandaloneself config inputsdict model bigbirdpegasusmodelconfigconfig totorchdevice eval outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder bigbirdpegasusencoder frompretrainedtmpdirname totorchdevice encoderlasthiddenstate2 encoderinputsdictinputids attentionmaskinputsdictattentionmask 0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max item 1e3 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder bigbirdpegasusdecoder frompretrainedtmpdirname totorchdevice lasthiddenstate2 decoder inputidsinputsdictdecoderinputids attentionmaskinputsdictdecoderattentionmask encoderhiddenstatesencoderlasthiddenstate encoderattentionmaskinputsdictattentionmask 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max item 1e3 def createandcheckmodelself config inputsdict model bigbirdpegasusmodelconfigconfig totorchdevice eval inputids inputsdictinputids decoderinputids inputsdictdecoderinputids result modelinputids decoderinputidsdecoderinputids usecachetrue self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize requiretorch class bigbirdpegasusmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses bigbirdpegasusmodel bigbirdpegasusforconditionalgeneration bigbirdpegasusforsequenceclassification bigbirdpegasusforquestionanswering if istorchavailable else allgenerativemodelclasses bigbirdpegasusforconditionalgeneration if istorchavailable else pipelinemodelmapping conversational bigbirdpegasusforconditionalgeneration featureextraction bigbirdpegasusmodel questionanswering bigbirdpegasusforquestionanswering summarization bigbirdpegasusforconditionalgeneration textclassification bigbirdpegasusforsequenceclassification textgeneration bigbirdpegasusforcausallm text2textgeneration bigbirdpegasusforconditionalgeneration translation bigbirdpegasusforconditionalgeneration zeroshot bigbirdpegasusforsequenceclassification if istorchavailable else isencoderdecoder true testmissingkeys false testpruning false testheadmasking false torchscript tests are not passing for now also torchscript is not an important feature to have in the beginning testtorchscript false todo fix the failed tests def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname if pipelinetestcassename qapipelinetests and not tokenizername endswithfast return true return false overwrite from generationtestermixin to solve problem with conflicting random seeds def getinputidsandconfigself batchsize2 config inputsdict self modeltester prepareconfigandinputsforcommon config attentiontype originalfull inputids inputsdictself inputname attentionmask torch oneslikeinputids dtypetorch long cut to half length take max batchsize 3 sequencelength inputids shape1 2 inputids inputids batchsize sequencelength attentionmask attentionmask batchsize sequencelength generate max 3 tokens maxlength inputids shape1 3 if config eostokenid is not none and config padtokenid is none hack to allow generate for models such as gpt2 as is done in generate config padtokenid config eostokenid return config inputids attentionmask maxlength def setupself self modeltester bigbirdpegasusmodeltesterself self configtester configtesterself configclassbigbirdpegasusconfig def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testencoderdecodermodelstandaloneself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkencoderdecodermodelstandaloneconfigandinputs def testmodelvariousattntypeself configandinputs self modeltester prepareconfigandinputs for type in originalfull blocksparse configandinputs0 attentiontype type self modeltester createandcheckmodelconfigandinputs def testgeneratewithoutinputidsself if self modeltester attentiontype blocksparse this test can never pass for bigbirdblocksparse attention since inputids must be multiple of blocksize return super testgeneratewithoutinputids def testretaingradhiddenstatesattentionsself if self modeltester attentiontype blocksparse this test can t pass since attention matrix which is getting returned can t have gradients just 0 at many locations return super testretaingradhiddenstatesattentions bigbirdpegasusforsequenceclassification does not support inputsembeds def testinputsembedsself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in bigbirdpegasusmodel bigbirdpegasusforconditionalgeneration bigbirdpegasusforquestionanswering model modelclassconfig model totorchdevice model eval inputs copy deepcopyself prepareforclassinputsdict modelclass if not self isencoderdecoder inputids inputsinputids del inputsinputids else encoderinputids inputsinputids decoderinputids inputs getdecoderinputids encoderinputids del inputsinputids inputs popdecoderinputids none wte model getinputembeddings if not self isencoderdecoder inputsinputsembeds wteinputids else inputsinputsembeds wteencoderinputids inputsdecoderinputsembeds wtedecoderinputids with torch nograd modelinputs0 requiretorchfp16 def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs inputdict popdecoderattentionmask inputdict popdecoderinputids model bigbirdpegasusforconditionalgenerationconfig eval totorchdevice model half model generateinputdict model generateinputdict dosampletrue earlystoppingfalse numreturnsequences3 slow def testbatchedforwardoriginalfullself self checkbatchedforwardattntypeoriginalfull slow def testbatchedforwardblocksparseself self checkbatchedforwardattntypeblocksparse tolerance1e1 def checkbatchedforwardself attntype tolerance1e3 config self modeltester prepareconfigandinputs config maxpositionembeddings 128 config blocksize 16 config attentiontype attntype model bigbirdpegasusforconditionalgenerationconfig totorchdevice model eval chunklength 32 samplewithpadding 3 8 11 chunklength 0 chunklength samplewithoutpadding 4 7 9 13 chunklength targetidswithoutpadding 2 3 8 targetidswithpadding 7 8 6 4 100 attentionmask torch tensor 1 3 chunklength 0 chunklength 1 4 chunklength devicetorchdevice dtypetorch long inputids torch tensorsamplewithpadding samplewithoutpadding devicetorchdevice dtypetorch long labels torch tensor targetidswithoutpadding targetidswithpadding devicetorchdevice dtypetorch long with torch nograd logitsbatched modelinputidsinputids attentionmaskattentionmask labelslabels logits with torch nograd logitssinglefirst modelinputidsinputids 1 chunklength labelslabels 1 logits self asserttruetorch allcloselogitsbatched0 3 logitssinglefirst0 3 atoltolerance with torch nograd logitssinglesecond modelinputidsinputids1 labelslabels1 4 logits self asserttruetorch allcloselogitsbatched1 3 logitssinglesecond0 3 atoltolerance def testautopaddingself ids 7 6 9 65 config self modeltester prepareconfigandinputs inputids torch tensorids devicetorchdevice dtypetorch long attentionmask inputids newonesinputids shape decoderinputids torch tensor33 5 8 3 devicetorchdevice dtypetorch long config blocksize 8 model bigbirdpegasusforconditionalgenerationconfig eval totorchdevice output1 modelinputidsinputids attentionmaskattentionmask decoderinputidsdecoderinputids logits ids 7 6 9 65 0 5 inputids torch tensorids devicetorchdevice dtypetorch long attentionmask torch tensor1 3 65 0 5 devicetorchdevice dtypetorch long output2 modelinputidsinputids attentionmaskattentionmask decoderinputidsdecoderinputids logits self asserttruetorch allcloseoutput1 output2 atol1e5 def testforchangetofullattnself self modeltester seqlength 9 config inputdict self modeltester prepareconfigandinputs automatic switch will happen config attentiontype blocksparse model bigbirdpegasusforconditionalgenerationconfig eval totorchdevice statedict model statedict outputs1 modelinputdictlogits config attentiontype originalfull model bigbirdpegasusforconditionalgenerationconfig eval totorchdevice model loadstatedictstatedict outputs2 modelinputdictlogits self asserttruetorch allcloseoutputs1 outputs2 atol1e5 requiretorch requiresentencepiece requiretokenizers slow class bigbirdpegasusmodelintegrationtestsunittest testcase def getdummyinputidsself fmt off ids torch tensor 685 560 630 193 836 764 708 360 10 724 278 755 805 600 71 473 601 397 315 706 487 552 88 175 601 850 678 538 846 73 778 917 116 977 756 710 1023 848 432 449 851 100 985 178 756 798 660 148 911 424 289 962 266 698 640 545 544 715 245 152 676 511 460 883 184 29 803 129 129 933 54 902 551 489 757 274 336 389 618 43 443 544 889 258 322 1000 938 58 292 871 120 780 431 83 92 897 399 612 566 909 634 939 85 204 325 775 965 48 640 1013 132 973 869 181 1001 847 144 661 228 955 792 720 910 374 854 561 306 582 170 676 449 96 198 607 257 882 691 293 931 817 862 388 611 555 974 369 1000 918 202 384 513 907 371 556 955 384 24 700 131 378 99 575 932 735 124 964 595 943 740 149 210 563 412 783 42 59 706 37 779 87 44 873 12 771 308 81 33 183 129 807 276 175 555 372 185 445 489 590 287 281 638 771 516 95 227 876 270 881 297 329 20 608 841 411 451 249 181 324 1005 830 783 865 261 964 750 140 1021 599 462 890 622 844 697 529 153 926 150 111 26 465 957 890 887 118 446 596 674 873 929 229 508 764 122 327 470 288 526 840 697 153 592 42 275 553 439 208 780 167 112 350 1018 130 736 887 813 217 382 25 68 979 1008 772 235 717 999 292 727 1023 702 710 728 556 33 12 617 213 139 695 1004 422 638 669 624 489 771 540 980 218 664 822 308 175 149 950 542 580 548 808 394 74 298 920 900 815 731 947 877 772 800 778 395 540 430 200 424 62 342 866 45 803 931 89 34 646 233 768 37 769 460 291 198 895 950 255 81 447 137 190 130 210 369 292 377 348 169 885 805 177 538 324 872 509 804 115 799 30 754 290 147 274 222 341 510 515 70 358 909 557 886 766 323 624 92 342 424 552 972 663 415 658 711 968 275 861 44 84 434 810 94 175 406 202 858 499 481 988 330 541 1004 210 618 955 897 983 576 17 107 165 607 537 629 192 196 308 137 953 860 94 892 751 88 161 148 585 456 88 14 315 594 121 885 952 833 716 733 933 282 801 427 783 471 285 277 979 325 535 228 891 596 648 969 574 654 518 257 137 208 464 950 140 5 424 349 942 283 587 821 1007 434 220 820 740 874 787 374 291 564 671 438 827 940 824 509 1021 787 942 856 450 327 491 54 817 95 60 337 667 637 164 571 946 107 202 301 782 890 839 551 680 649 14 1017 904 721 1017 535 505 848 986 777 740 775 210 456 469 474 963 573 401 57 883 750 664 281 5 613 1005 306 344 543 567 154 789 354 358 698 408 412 30 930 372 822 632 948 855 503 8 618 1010 138 695 897 852 377 933 722 149 886 1009 260 127 811 578 533 805 325 977 113 944 651 238 361 991 860 556 64 928 917 455 266 445 604 624 420 340 845 275 370 843 227 226 940 644 909 229 827 898 370 129 808 25 699 293 356 838 135 4 227 890 681 445 418 285 837 27 737 249 366 948 202 438 198 930 648 638 607 73 247 853 136 708 214 476 621 324 103 853 328 596 224 257 646 348 108 927 970 980 520 150 998 477 393 684 559 1 361 692 551 90 75 500 739 636 344 97 852 283 719 33 116 455 866 429 828 826 691 174 746 133 442 94 348 402 420 707 405 942 186 976 376 677 874 703 517 498 499 206 415 366 856 739 420 586 219 952 539 375 23 461 720 355 603 52 999 815 721 574 445 816 1019 105 641 395 972 910 328 607 519 686 246 415 528 170 167 310 940 595 392 221 834 682 835 115 861 335 742 220 247 101 416 222 179 509 175 606 627 674 781 737 746 849 67 457 1012 126 139 625 731 156 697 121 322 449 710 857 291 976 4 701 239 678 172 724 857 583 661 903 797 628 903 835 605 989 615 870 380 710 110 330 101 695 846 918 508 672 594 36 238 244 251 393 767 282 22 430 230 983 401 154 1007 120 678 896 386 390 711 397 347 587 1020 951 79 831 585 200 814 134 560 700 171 452 139 755 314 476 346 388 126 719 851 198 699 901 18 710 448 351 665 644 326 425 165 571 178 440 665 674 915 866 463 754 136 950 748 47 497 1013 640 930 338 158 525 631 815 887 289 803 116 600 637 410 175 499 876 565 1002 623 577 333 887 586 147 773 776 644 49 77 294 117 494 561 110 979 180 562 72 859 434 1007 286 516 75 597 491 322 888 533 209 43 499 29 411 856 181 305 963 615 778 259 373 877 746 858 381 886 613 91 69 618 523 13 617 226 422 168 929 379 290 923 100 218 307 345 211 789 735 669 585 275 410 921 552 235 636 285 665 659 708 173 724 302 823 1 139 708 903 732 868 442 967 916 163 51 243 871 noqa e231 dtypetorch long devicetorchdevice fmt on return ids def getdummytargetidsself fmt off ids torch tensor 13 6 1 4 12 4 8 10 4 6 3 5 8 7 9 9 noqa e231 dtypetorch long devicetorchdevice fmt on return ids def testinferenceblocksparseself model bigbirdpegasusforconditionalgeneration frompretrained modelid attentiontypeblocksparse blocksize16 numrandomblocks3 model totorchdevice inputids self getdummyinputids targetids self getdummytargetids outputs modelinputids labelstargetids predictionlogits outputs logits self assertequalpredictionlogits shape torch size1 16 96103 fmt off expectedpredictionlogitsslice torch tensor 1 5118 5 5227 4 8125 1 7603 8 1704 3 996 4 8118 6 7806 2 2297 6 9834 3 1906 0 103 7 1515 6 3679 3 1896 6 3054 3 9741 6 3772 5 0042 0 6338 6 7868 0 592 0 5363 1 87 0 331 2 4518 1 8263 3 1899 1 5702 5 8135 4 6675 2 3674 8 9828 3 7913 5 4027 7 6567 1 9007 7 3706 3 8824 0 0247 7 6094 6 6985 3 2826 7 0094 3 8713 5 6555 5 0439 0 3519 7 1525 0 4062 0 2419 2 2194 0 6447 2 9614 2 0713 3 248 1 4527 5 6003 4 5381 2 6382 9 2809 3 2969 5 6811 8 4011 1 6909 7 4937 4 3185 0 0878 7 61 6 6822 3 4753 7 3962 3 5336 4 9216 4 943 0 2043 7 3326 0 2199 0 6016 2 4367 0 7043 3 0689 2 3215 3 0611 1 1084 5 6308 4 4886 2 717 9 4103 3 0733 5 5825 8 4325 1 3075 7 5495 4 4782 0 1092 7 8115 6 6285 3 5311 7 6853 3 509 4 4994 4 9224 0 1384 7 3069 0 0473 0 8578 2 4632 0 5249 3 4627 2 2671 2 8818 noqa e231 devicetorchdevice fmt on self asserttrue torch allclosepredictionlogits0 4 8 128 156 expectedpredictionlogitsslice atol1e4 def testinferencefullattnself model bigbirdpegasusforconditionalgeneration frompretrainedmodelid attentiontypeoriginalfull model totorchdevice inputids self getdummyinputids targetids self getdummytargetids outputs modelinputids labelstargetids predictionlogits outputs logits self assertequalpredictionlogits shape torch size1 16 96103 fmt off expectedpredictionlogitsslice torch tensor 1 3418 5 8304 6 5662 2 0448 8 7702 4 6579 4 9947 6 429 2 4296 7 9431 4 217 0 0672 7 334 5 1966 2 9603 6 0814 4 6756 7 5522 5 076 0 213 6 6638 0 6577 0 244 2 1221 0 7531 2 4076 1 8731 3 5594 1 5525 6 0524 6 309 2 6245 9 229 4 5213 5 0913 7 0622 1 7992 8 0962 4 7994 0 0248 7 7168 5 5878 3 0883 6 5248 4 7895 6 9974 4 8787 0 5445 6 6686 0 0102 0 1659 2 6195 0 7389 2 8956 1 9928 3 3777 1 6407 6 2104 6 0331 2 8076 9 4074 3 9772 5 0574 7 5316 1 4201 8 3035 5 0212 0 1031 7 553 5 5023 3 1427 6 7674 4 4409 6 457 4 525 0 728 6 5422 0 6234 0 4726 2 7486 0 6985 3 0804 1 9669 3 2365 1 5065 6 1271 5 8296 2 8405 9 5649 3 6834 5 1214 7 546 0 9758 8 3335 5 1952 0 1395 7 4348 5 6893 3 2942 7 0356 4 1665 5 9695 4 3898 0 8931 6 3988 0 8957 0 7522 2 8924 0 6498 3 4358 1 8654 2 9735 noqa e231 devicetorchdevice fmt on self asserttrue torch allclosepredictionlogits0 4 8 128 156 expectedpredictionlogitsslice atol1e4 def testseqtoseqgenerationself modelid googlebigbirdpegasuslargearxiv model bigbirdpegasusforconditionalgeneration frompretrainedmodelid totorchdevice tokenizer pegasustokenizer frompretrainedmodelid articlelep rthe lep experiments at the resonance of xmath1boson have tested the standard model sm at quantum level measuring the xmath1decay into fermion pairs with an accuracy of one part in ten thousands the good agreement of the lep data with the sm predictions have severely constrained the behavior of new physics at the xmath1pole taking these achievements into account one can imagine that the physics of xmath1boson will again play the central role in the frontier of particle physics if the next generation xmath1 factory comes true with the generated xmath1 events several orders of magnitude higher than that of the lep this factory can be realized in the gigaz option of the international linear collider ilcxcite the ilc is a proposed electron positron collider with tunable energy ranging from xmath12 to xmath13 and polarized beams in its first phase and the gigaz option corresponds to its operation on top of the resonance of xmath1 boson by adding a bypass to its main beam line given the high luminosity xmath14 and the cross section at the resonance of xmath1 boson xmath15 about xmath16 xmath1 events can be generated in an operational year of xmath17 of gigaz which implies that the expected sensitivity to the branching ratio of xmath1decay can be improved from xmath18 at the lep to xmath19 at the gigazxcite in light of this the xmath1boson properties especially its exotic or rare decays which are widely believed to be sensitive to new physics should be investigated comprehensively to evaluate their potential in probing new physics among the rare xmath1decays the flavor changing fc processes were most extensively studied to explore the flavor texture in new physics xcite and it was found that although these processes are severely suppressed in the sm their branching ratios in new physics models can be greatly enhanced to xmath19 for lepton flavor violation decays xcite and xmath20 for quark flavor violation decays xcite besides the fc processes the xmath1decay into light higgs bosons is another type of rare process that was widely studied e g the decay xmath21 xmath22 with the particle xmath0 denoting a light higgs boson was studied in xcite the decay xmath23 was studied in the two higgs doublet model 2hdmxcite and the minimal supersymmetric standard model mssmxcite and the decay xmath4 was studied in a model independent way xcite in 2hdmxcite and also in mssmxcite these studies indicate that in contrast with the kinematic forbidden of these decays in the sm the rates of these decays can be as large as xmath18 in new physics models which lie within the expected sensitivity of the gigaz in this work we extend the previous studies of these decays to some new models and investigate these decays altogether we are motivated by some recent studies on the singlet extension of the mssm such as the next to minimal supersymmetric standard model nmssm xcite and the nearly minimal supersymmetric standard model nmssm xcite where a light cp odd higgs boson xmath0 with singlet dominant component may naturally arise from the spontaneous breaking of some approximate global symmetry like xmath24 or peccei quuin symmetry xcite these non minimal supersymmetric models can not only avoid the xmath25problem but also alleviate the little hierarchy by having such a light higgs boson xmath0 xcite we are also motivated by that with the latest experiments the properties of the light higgs boson are more stringently constrained than before so it is worth updating the previous studies so far there is no model independent lower bound on the lightest higgs boson mass in the sm it must be heavier than xmath26 gev obtained from the null observation of the higgs boson at lep experiments however due to the more complex structure of the higgs sector in the extensions of the sm this lower bound can be significantly relaxed according to recent studies e g for the cp odd higgs boson xmath0 we have xmath27 gev in the nmssm xcite xmath28 gev in the nmssm xcite and xmath29 gev in the lepton specific 2hdm l2hdm xcite with such a light cp odd higgs boson the z decay into one or more xmath0 is open up noting that the decay xmath30 is forbidden due to bose symmetry we in this work study the rare xmath1decays xmath6 xmath22 xmath31 and xmath4 in a comparative way for four models namely the type ii 2hdmxcite the l2hdm xcite the nmssm and the nmssm in our study we examine carefully the constraints on the light xmath0 from many latest experimental results this work is organized as follows in sec ii we briefly describe the four new physics models in sec iii we present the calculations of the rare xmath1decays in sec iv we list the constraints on the four new physics models in sec v we show the numerical results for the branching ratios of the rare xmath1decays in various models finally the conclusion is given in sec as the most economical way the sm utilizes one higgs doublet to break the electroweak symmetry as a result the sm predicts only one physical higgs boson with its properties totally determined by two free parameters in new physics models the higgs sector is usually extended by adding higgs doublets andor singlets and consequently more physical higgs bosons are predicted along with more free parameters involved in the general 2hdm contains two xmath32 doublet higgs fields xmath33 and xmath34 and with the assumption of cp conserving its scalar potential can be parameterized asxcite xmath35 endaligned where xmath36 xmath37 are free dimensionless parameters and xmath38 xmath39 are the parameters with mass dimension after the electroweak symmetry breaking the spectrum of this higgs sector includes three massless goldstone modes which become the longitudinal modes of xmath40 and xmath1 bosons and five massive physical states two cp even higgs bosons xmath41 and xmath42 one neutral cp odd higgs particle xmath0 and a pair of charged higgs bosons xmath43 noting the constraint xmath44 with xmath45 and xmath46 denoting the vacuum expectation values vev of xmath33 and xmath34 respectively we choose xmath47 as the input parameters with xmath48 and xmath49 being the mixing angle that diagonalizes the mass matrix of the cp even higgs fields the difference between the type ii 2hdm and the l2hdm comes from the yukawa coupling of the higgs bosons to quark lepton in the type ii 2hdm one higgs doublet xmath34 generates the masses of up type quarks and the other doublet xmath33 generates the masses of down type quarks and charged leptons while in the l2hdm one higgs doublet xmath33 couples only to leptons and the other doublet xmath34 couples only to quarks so the yukawa interactions of xmath0 to fermions in these two models are given by xcite xmath50 with xmath51 denoting generation index obviously in the type ii 2hdm the xmath52 coupling and the xmath53 coupling can be simultaneously enhanced by xmath54 while in the l2hdm only the xmath53 coupling is enhanced by xmath55 the structures of the nmssm and the nmssm are described by their superpotentials and corresponding soft breaking terms which are given by xcite xmath56 where xmath57 is the superpotential of the mssm without the xmath25 term xmath58 and xmath59 are higgs doublet and singlet superfields with xmath60 and xmath61 being their scalar component respectively xmath62 xmath63 xmath64 xmath65 xmath66 and xmath67 are soft breaking parameters and xmath68 and xmath69 are coefficients of the higgs self interactions with the superpotentials and the soft breaking terms one can get the higgs potentials of the nmssm and the nmssm respectively like the 2hdm the higgs bosons with same cp property will mix and the mass eigenstates are obtained by diagonalizing the corresponding mass matrices xmath70 where the fields on the right hands of the equations are component fields of xmath71 xmath72 and xmath61 defined by xmath73 xmath74 and xmath75 are respectively the cp even and cp odd neutral higgs bosons xmath76 and xmath77 are goldstone bosons eaten by xmath1 and xmath78 and xmath79 is the charged higgs boson so both the nmssm and nmssm predict three cp even higgs bosons two cp odd higgs bosons and one pair of charged higgs bosons in general the lighter cp odd higgs xmath0 in these model is the mixture of the singlet field xmath80 and the doublet field combination xmath81 i e xmath82 and its couplings to down type quarks are then proportional to xmath83 so for singlet dominated xmath0 xmath84 is small and the couplings are suppressed as a comparison the interactions of xmath0 with the squarks are given byxcite xmath85 i e the interaction does not vanish when xmath86 approaches zero just like the 2hdm where we use the vevs of the higgs fields as fundamental parameters we choose xmath68 xmath69 xmath87 xmath88 xmath66 and xmath89 as input parameters for the nmssmxcite and xmath68 xmath54 xmath88 xmath65 xmath90 and xmath91 as input parameters for the nmssmxcite about the nmssm and the nmssm three points should be noted the first is for the two models there is no explicit xmath92term and the effective xmath25 parameter xmath93 is generated when the scalar component of xmath59 develops a vev the second is the nmssm is actually same as the nmssm with xmath94xcite because the tadpole terms xmath95 and its soft breaking term xmath96 in the nmssm do not induce any interactions except for the tree level higgs boson masses and the minimization conditions and the last is despite of the similarities the nmssm has its own peculiarity which comes from its neutralino sector in the basis xmath97 its neutralino mass matrix is given by xcite xmath98 where xmath99 and xmath100 are xmath101 and xmath102 gaugino masses respectively xmath103 xmath104 xmath105 and xmath106 after diagonalizing this matrix one can get the mass eigenstate of the lightest neutralino xmath107 with mass taking the following form xcite xmath108 this expression implies that xmath107 must be lighter than about xmath109 gev for xmath110 from lower bound on chargnio mass and xmath111 perturbativity bound like the other supersymmetric models xmath107 as the lightest sparticle acts as the dark matter in the universe but due to its singlino dominated nature it is difficult to annihilate sufficiently to get the correct density in the current universe so the relic density of xmath107 plays a crucial way in selecting the model parameters for example as shown in xcite for xmath112 there is no way to get the correct relic density and for the other cases xmath107 mainly annihilates by exchanging xmath1 boson for xmath113 or by exchanging a light cp odd higgs boson xmath0 with mass satisfying the relation xmath114 for xmath115 for the annihilation xmath54 and xmath25 are required to be less than 10 and xmath116 respectively because through eq mass exp a large xmath87 or xmath25 will suppress xmath117 to make the annihilation more difficult the properties of the lightest cp odd higgs boson xmath0 such as its mass and couplings are also limited tightly since xmath0 plays an important role in xmath107 annihilation the phenomenology of the nmssm is also rather special and this was discussed in detail in xcite in the type ii 2hdm l2hdm nmssm and nmssm the rare xmath1decays xmath118 xmath22 xmath3 and xmath4 may proceed by the feynman diagrams shown in fig fig1 fig fig2 and fig fig3 respectively for these diagrams the intermediate state xmath119 represents all possible cp even higgs bosons in the corresponding model i e xmath41 and xmath42 in type ii 2hdm and l2hdm and xmath41 xmath42 and xmath120 in nmssm and nmssm in order to take into account the possible resonance effects of xmath119 in fig fig1c for xmath2 and fig fig3 a for xmath11 we have calculated all the decay modes of xmath119 and properly included the width effect in its propagator as to the decay xmath121 two points should be noted one is unlike the decays xmath6 and xmath11 this process proceeds only through loops mediated by quarks leptons in the type ii 2hdm and l2hdm and additionally by sparticles in the nmssm and nmssm so in most cases its rate should be much smaller than the other two the other is due to cp invariance loops mediated by squarks sleptons give no contribution to the decayxcite in actual calculation this is reflected by the fact that the coupling coefficient of xmath122 differs from that of xmath123 by a minus sign see eq asqsq and as a result the squark mediated contributions to xmath121 are completely canceled out with regard to the rare decay xmath11 we have more explanations in the lowest order this decay proceeds by the diagram shown in fig fig3 a and hence one may think that as a rough estimate it is enough to only consider the contributions from fig fig3a however we note that in some cases of the type ii 2hdm and l2hdm due to the cancelation of the contributions from different xmath119 in fig fig3 a and also due to the potentially largeness of xmath124 couplings i e larger than the electroweak scale xmath125 the radiative correction from the higgs mediated loops may dominate over the tree level contribution even when the tree level prediction of the rate xmath126 exceeds xmath20 on the other hand we find the contribution from quark lepton mediated loops can be safely neglected if xmath127 in the type ii 2hdm and the l2hdm in the nmssm and the nmssm besides the corrections from the higgs and quark lepton mediated loops loops involving sparticles such as squarks charginos and neutralinos can also contribute to the decay we numerically checked that the contributions from squarks and charginos can be safely neglected if xmath127 we also calculated part of potentially large neutralino correction note that there are totally about xmath128 diagrams for such correction and found they can be neglected too since considering all the radiative corrections will make our numerical calculation rather slow we only include the most important correction namely that from higgs mediated loops in presenting our results for the four models one can intuitively understand the relative smallness of the sparticle contribution to xmath11 as follows first consider the squark contribution which is induced by the xmath129 interaction xmath130 denotes the squark in chirality state and the xmath131 interaction through box diagrams because the xmath132 interaction conserves the chirality of the squarks while the xmath133 interaction violates the chirality to get non zero contribution to xmath11 from the squark loops at least four chiral flippings are needed with three of them provided by xmath131 interaction and the rest provided by the left right squark mixing this means that if one calculates the amplitude in the chirality basis with the mass insertion method the amplitude is suppressed by the mixing factor xmath134 with xmath135 being the off diagonal element in squark mass matrix next consider the chargino neutralino contributions since for a light xmath0 its doublet component parameterized by xmath84 in eq mixing is usually small the couplings of xmath0 with the sparticles will never be tremendously largexcite so the chargino neutralino contributions are not important too in our calculation of the decays we work in the mass eigenstates of sparticles instead of in the chirality basis for the type ii 2hdm and the l2hdm we consider the following constraints xcite theoretical constraints on xmath136 from perturbativity unitarity and requirements that the scalar potential is finit at large field values and contains no flat directions xcite which imply that xmath137 the constraints from the lep search for neutral higgs bosons we compute the signals from the higgs strahlung production xmath138 xmath139 with xmath140 xcite and from the associated production xmath141 with xmath142 xcite and compare them with the corresponding lep data which have been inputted into our code we also consider the constraints from xmath138 by looking for a peak of xmath143 recoil mass distribution of xmath1boson xcite and the constraint of xmath144 mev when xmath145 xcite these constraints limit the quantities such as xmath146 times br hi to barb b on the xmath147 plane with the the subscript xmath148 denoting the coupling coefficient of the xmath149 interaction they also impose a model dependent lower bound on xmath150 e g xmath151 for the type ii 2hdm from our scan results xmath152 for the l2hdmxcite and xmath153 for the nmssm xcite these bounds are significantly lower than that of the sm i e xmath154 partially because in new physics models unconventional decay modes of xmath155 such as xmath156 are open up as to the nmssm another specific reason for allowing a significantly lighter cp even higgs boson is that the boson may be singlet dominated in this model with regard to the lightest cp odd higgs boson xmath0 we checked that there is no lower bound on its mass so long as the xmath157 interaction is weak or xmath155 is sufficiently heavy the constraints from the lep search for a light higgs boson via the yukawa process xmath158 with xmath22 and xmath61 denoting a scalar xcite these constraints can limit the xmath159 coupling versus xmath160 in new physics models the constraints from the cleo iii limit on xmath161 and the latest babar limits on xmath162 these constraints will put very tight constraints on the xmath163 coupling for xmath164 in our analysis we use the results of fig 8 in the second paper of xcite to excluded the unfavored points the constraints from xmath165 couplings since the higgs sector can give sizable higher order corrections to xmath165 couplings we calculate them to one loop level and require the corrected xmath165 couplings to lie within the xmath166 range of their fitted value the sm predictions for the couplings at xmath1pole are given by xmath167 and xmath168 xcite and the fitted values are given by xmath169 and xmath170 respectivelyxcite we adopt the formula in xcite to the 2hdm in our calculation the constraints from xmath171 leptonic decay we require the new physics correction to the branching ratio xmath172 to be in the range of xmath173 xcite we use the formula in xcite in our calculation about the constraints 5 and 6 two points should be noted one is all higgs bosons are involved in the constraints by entering the self energy of xmath171 lepton the xmath174 vertex correction or the xmath175 vertex correction and also the box diagrams for xmath176xcite since the yukawa couplings of the higgs bosons to xmath171 lepton get enhanced by xmath54 and so do the corrections xmath54 must be upper bounded for given spectrum of the higgs sector generally speaking the lighter xmath0 is the more tightly xmath54 is limitedxcite the other point is in the type ii 2hdm xmath177 b physics observables as well as xmath178 decays discussed above can constraint the model in a tighter way than the constraints 5 and 6 since the yukawa couplings of xmath171 lepton and xmath179 quark are simultaneously enhanced by xmath54 but for the l2hdm because only the yukawa couplings of xmath171 lepton get enhanced see eq yukawa the constraints 5 and 6 are more important in limiting xmath54 indirect constraints from the precision electroweak observables such as xmath180 xmath181 and xmath182 or their combinations xmath183 xcite we require xmath184 to be compatible with the lep sld data at xmath185 confidence levelxcite we also require new physics prediction of xmath186 is within the xmath187 range of its experimental value the latest results for xmath188 are xmath189 measured value and xmath190 sm prediction for xmath191 gev xcite in our code we adopt the formula for these observables presented in xcite to the type ii 2hdm and the l2hdm respectively in calculating xmath180 xmath181 and xmath182 we note that these observables get dominant contributions from the self energies of the gauge bosons xmath1 xmath192 and xmath193 since there is no xmath194 coupling or xmath195 coupling xmath0 must be associated with the other higgs bosons to contribute to the self energies so by the uv convergence of these quantities one can infer that for the case of a light xmath0 and xmath196 these quantities depend on the spectrum of the higgs sector in a way like xmath197 at leading order which implies that a light xmath0 can still survive the constraints from the precision electroweak observables given the splitting between xmath150 and xmath198 is moderatexcite the constraints from b physics observables such as the branching ratios for xmath199 xmath200 and xmath201 and the mass differences xmath202 and xmath203 we require their theoretical predications to agree with the corresponding experimental values at xmath187 level in the type ii 2hdm and the l2hdm only the charged higgs boson contributes to these observables by loops so one can expect that xmath198 versus xmath54 is to be limited combined analysis of the limits in the type ii 2hdm has been done by the ckmfitter group and the lower bound of xmath204 as a function of xmath87 was given in fig 11 of xcite this analysis indicates that xmath198 must be heavier than xmath205 at xmath185 c l regardless the value of xmath54 in this work we use the results of fig 11 in xcite to exclude the unfavored points as for the l2hdm b physics actually can not put any constraintsxcite because in this model the couplings of the charged higgs boson to quarks are proportional to xmath206 and in the case of large xmath54 which we are interested in they are suppressed in our analysis of the l2hdm we impose the lep bound on xmath198 i e xmath207xcite the constraints from the muon anomalous magnetic moment xmath208 now both the theoretical prediction and the experimental measured value of xmath208 have reached a remarkable precision but a significant deviation still exists xmath209 xcite in the 2hdm xmath208 gets additional contributions from the one loop diagrams induced by the higgs bosons and also from the two loop barr zee diagrams mediated by xmath0 and xmath155xcite if the higgs bosons are much heavier than xmath25 lepton mass the contributions from the barr zee diagrams are more important and to efficiently alleviate the discrepancy of xmath208 one needs a light xmath0 along with its enhanced couplings to xmath25 lepton and also to heavy fermions such as bottom quark and xmath171 lepton to push up the effects of the barr zee diagramxcite the cp even higgs bosons are usually preferred to be heavy since their contributions to xmath208 are negative in the type ii 2hdm because xmath54 is tightly constrained by the process xmath210 at the lepxcite and the xmath178 decayxcite the barr zee diagram contribution is insufficient to enhance xmath208 to xmath187 range around its measured valuexcite so in our analysis we require the type ii 2hdm to explain xmath208 at xmath211 level while for the l2hdm xmath54 is less constrained compared with the type ii 2hdm and the barr zee diagram involving the xmath171loop is capable to push up greatly the theoretical prediction of xmath208xcite therefore we require the l2hdm to explain the discrepancy at xmath187 level unlike the other constraints discussed above the xmath208 constraint will put a two sided bound on xmath54 since on the one hand it needs a large xmath54 to enhance the barr zee contribution but on the other hand too large xmath54 will result in an unacceptable large xmath208 since this paper concentrates on a light xmath0 the decay xmath212 is open up with a possible large decay width we require the width of any higgs boson to be smaller than its mass to avoid a too fat higgs bosonxcite we checked that for the scenario characterized by xmath213 the coefficient of xmath214 interaction is usually larger than the electroweak scale xmath125 and consequently a large decay width is resulted for the nmssm and nmssm the above constraints become more complicated because in these models not only more higgs bosons are involved in but also sparticles enter the constraints so it is not easy to understand some of the constraints intuitively take the process xmath199 as an example in the supersymmetric models besides the charged higgs contribution chargino loops gluino loops as well as neutralino loops also contribute to the processxcite and depending on the susy parameters any of these contributions may become dominated over or be canceled by other contributions as a result although the charged higgs affects the process in the same way as that in the type ii 2hdm charged higgs as light as xmath215 is still allowed even for xmath216xcite since among the constraints xmath208 is rather peculiar in that it needs new physics to explain the discrepancy between xmath217 and xmath218 we discuss more about its dependence on susy parameters in the nmssm and the nmssm xmath208 receives contributions from higgs loops and neutralino chargino loops for the higgs contribution it is quite similar to that of the type ii 2hdm except that more higgs bosons are involved inxcite for the neutralino chargino contribution in the light bino limit i e xmath219 it can be approximated byxcite xmath220 for xmath221 with xmath222 being smuon mass so combining the two contributions together one can learn that a light xmath0 along with large xmath54 andor light smuon with moderate xmath87 are favored to dilute the discrepancy because more parameters are involved in the constraints on the supersymmetric models we consider following additional constraints to further limit their parameters direct bounds on sparticle masses from the lep1 the lep2 and the tevatron experiments xcite the lep1 bound on invisible z decay xmath223 the lep2 bound on neutralino production xmath224 and xmath225xcite dark matter constraints from the wmap relic density 0 0975 xmath226 0 1213 xcite note that among the above constraints the constraint 2 on higgs sector and the constraint c on neutralino sector are very important this is because in the supersymmetric models the sm like higgs is upper bounded by about xmath227 at tree level and by about xmath228 at loop level and that the relic density restricts the lsp annihilation cross section in a certain narrow range in our analysis of the nmssm we calculate the constraints 3 and 5 7 by ourselves and utilize the code nmssmtools xcite to implement the rest constraints we also extend nmssmtools to the nmssm to implement the constraints for the extension the most difficult thing we faced is how to adapt the code micromegasxcite to the nmssm case we solve this problem by noting the following facts as we mentioned before the nmssm is actually same as the nmssm with the trilinear singlet term setting to zero so we can utilize the model file of the nmssm as the input of the micromegas and set xmath229 since in the nmssm the lsp is too light to annihilate into higgs pairs there is no need to reconstruct the effective higgs potential to calculate precisely the annihilation channel xmath230 with xmath61 denoting any of higgs bosonsxcite we thank the s of the nmssmtools for helpful discussion on this issue when we finish such extensionxcite with the above constraints we perform four independent random scans over the parameter space of the type ii 2hdm the l2hdm the nmssm and the nmssm respectively we vary the parameters in following ranges xmath231 for the type ii 2hdm xmath232 for the l2hdm xmath233 for the nmssm and xmath234 for the nmssm in performing the scans we note that for the nmssm and the nmssm some constraints also rely on the gaugino masses and the soft breaking parameters in the squark sector and the slepton sector since these parameters affect little on the properties of xmath0 we fix them to reduce the number of free parameters in our scan for the squark sector we adopt the xmath235 scenario which assumes that the soft mass parameters for the third generation squarks are degenerate xmath236 800 gev and that the trilinear couplings of the third generation squarks are also degenerate xmath237 with xmath238 for the slepton sector we assume all the soft breaking masses and trilinear parameters to be 100 gev this setting is necessary for the nmssm since this model is difficult to explain the muon anomalous moment at xmath239 level for heavy sleptonsxcite finally we assume the grand unification relation xmath240 for the gaugino masses with xmath241 being fine structure constants of the different gauge group with large number of random points in the scans we finally get about xmath242 xmath243 xmath244 and xmath242 samples for the type ii 2hdm the l2hdm the nmssm and the nmssm respectively which survive the constraints and satisfy xmath245 analyzing the properties of the xmath0 indicates that for most of the surviving points in the nmssm and the nmssm its dominant component is the singlet field numerically speaking xmath246 so that its couplings to the sm fermions are suppressedxcite our analysis also indicates that the main decay products of xmath0 are xmath247 for the l2hdmxcite xmath248 dominant and xmath247 subdominant for the type ii 2hdm the nmssm and the nmssm and in some rare cases neutralino pairs in the nmssmxcite in fig fig4 we project the surviving samples on the xmath249 plane this figure shows that the allowed range of xmath54 is from xmath250 to xmath251 in the type ii 2hdm and from xmath252 to xmath253 in the l2hdm just as we introduced before the lower bounds of xmath254 come from the fact that we require the models to explain the muon anomalous moment while the upper bound is due to we have imposed the constraint from the lep process xmath255 which have limited the upper reach of the xmath256 coupling for light xmath61 xcitefor the dependence of xmath256 coupling on xmath54 see sec this figure also indicates that for the nmssm and the nmssm xmath54 is upper bounded by xmath257 for the nmssm this is because large xmath87 can suppress the dark matter mass to make its annihilation difficult see xcite and also sec ii but for the nmssm this is because we choose a light slepton mass so that large xmath54 can enhance xmath208 too significantly to be experimentally unacceptable we checked that for the slepton mass as heavy as xmath258 xmath259 is still allowed for the nmssm in fig fig5 and fig fig6 we show the branching ratios of xmath260 and xmath261 respectively fig fig5 indicates among the four models the type ii 2hdm predicts the largest ratio for xmath260 with its value varying from xmath262 to xmath263 the underlying reason is in the type ii 2hdm the xmath264 coupling is enhanced by xmath54 see fig fig4 while in the other three model the coupling is suppressed either by xmath265 or by the singlet component of the xmath0 fig fig6 shows that the l2hdm predicts the largest rate for xmath266 with its value reaching xmath5 in optimum case and for the other three models the ratio of xmath261 is at least about one order smaller than that of xmath267 this feature can be easily understood from the xmath268 coupling introduced in sect we emphasize that if the nature prefers a light xmath0 xmath260 andor xmath269 in the type ii 2hdm and the l2hdm will be observable at the gigaz then by the rates of the two decays one can determine whether the type ii 2hdm or the l2hdm is the right theory on the other hand if both decays are observed with small rates or fail to be observed the singlet extensions of the mssm are favored in fig fig7 we show the rate of xmath3 as the function of xmath270 this figure indicates that the branching ratio of xmath121 can reach xmath271 xmath272 xmath273 and xmath274 for the optimal cases of the type ii 2hdm the l2hdm the nmssm and the nmssm respectively which implies that the decay xmath121 will never be observable at the gigaz if the studied model is chosen by nature the reason for the smallness is as we pointed out before that the decay xmath121 proceeds only at loop level comparing the optimum cases of the type ii 2hdm the nmssm and the nmssm shown in fig 5 7 one may find that the relation xmath275 holds for any of the decays this is because the decays are all induced by the yukawa couplings with similar structure for the models in the supersymmetric models the large singlet component of the light xmath0 is to suppress the yukawa couplings and the xmath0 in the nmssm has more singlet component than that in the nmssm next we consider the decay xmath11 which unlike the above decays depends on the higgs self interactions in fig fig8 we plot its rate as a function of xmath270 and this figure indicates that the xmath276 may be the largest among the ratios of the exotic xmath1 decays reaching xmath277 in the optimum cases of the type ii 2hdm the l2hdm and the nmssm the underlying reason is in some cases the intermediate state xmath119 in fig fig3 a may be on shell in fact we find this is one of the main differences between the nmssm and the nmssm that is in the nmssm xmath119 in fig fig3 a may be on shell corresponds to the points with large xmath278 while in the nmssm this seems impossible so we conclude that the decay xmath11 may serve as an alternative channel to test new physics models especially it may be used to distinguish the nmssm from the nmssm if the supersymmetry is found at the lhc and the xmath11 is observed at the gigaz with large rate before we end our discussion we note that in the nmssm the higgs boson xmath0 may be lighter than xmath279 without conflicting with low energy data from xmath178 decays and the other observables see fig fig4fig8 in this case xmath0 is axion like as pointed out in xcite we checked that among the rare xmath1 decays discussed in this paper the largest branching ratio comes from xmath280 which can reach xmath281 since in this case the decay product of xmath0 is highly collinear muon pair detecting the decay xmath280 may need some knowledge about detectors which is beyond our discussion in this paper we studied the rare xmath1decays xmath2 xmath7 xmath282 and xmath4 in the type ii 2hdm lepton specific 2hdm nmssm and nmssm which predict a light cp odd higgs boson xmath0 in the parameter space allowed by current experiments the branching ratio can be as large as xmath5 for xmath118 xmath8 for xmath3 and xmath9 for xmath4 which implies that the decays xmath2 and xmath283 may be accessible at the gigaz option since different models predict different size of branching ratios these decays can be used to distinguish different model through the measurement of these rare decays this work was supported in part by hastit under grant no 2009hastit004 by the national natural science foundation of china nnsfc under grant nos 10821504 10725526 10635030 10775039 11075045 and by the project of knowledge innovation program pkip of chinese academy of sciences under grant no for some reviews see e g m a perez g tavares velasco and j j toscano int j mod a 19 159 2004 j m yang arxiv 1006 2594 j i illana m masip 67 035004 2003 j cao z xiong j m yang 32 245 2004 d atwood et al 66 093005 2002 j kalinowski and s pokorski 219 116 1989 a djouadi p m zerwas and j zunft 259 175 1991 a djouadi j kalinowski and p m zerwas z phys c 54 255 1992 m krawczyk et al 19 463 2001 8 495 1999 j f gunion g gamberini and s f novaes 38 3481 1988 thomas j weiler and tzu chiang yuan 318 337 1989 a djouadi et al 1 163 1998hep ph9701342 d chang and w y keung phys lett 77 3732 1996 e keith and e ma 57 2017 1998 m a perez g tavares velasco and j j toscano int j mod phys a 19 159 2004 f larios g tavares velasco and c p yuan 64 055004 2001 66 075006 2002 a djouadi et al 10 27 1999 hep ph9903229 for a detailed introduction of the nmssm see f franke and h fraas int j mod a 12 1997 479 for a recent review of the nmssm see for example u ellwanger c hugonie and a m teixeira arxiv 0910 1785 see e g j r ellis j f gunion h e haber l roszkowski and f zwirner phys rev d 39 1989 844 m drees int j mod phys a 4 1989 3635 u ellwanger m rausch de traubenberg and c a savoy phys b 315 1993 331 nucl b 492 1997 21 d j miller r nevzorov p m zerwas 681 3 2004 c panagiotakopoulos k tamvakis 446 224 1999 469 145 1999 c panagiotakopoulos a pilaftsis 63 055003 2001 a dedes et al 63 055009 2001 a menon et al 70 035005 2004 v barger et al 630 85 2005 c balazs et al 0706 066 2007 b a dobrescu k t matchev 0009 031 2000 a arhrib k cheung t j hou k w song hep ph0611211 0703 073 2007 x g he j tandean and g valencia 98 081802 2007 0806 002 2008 f domingo et al 0901 061 2009 gudrun hiller 70 034018 2004 r dermisek and john f gunion 75 075019 2007 79 055014 2009 81 055001 2010 r dermisek john f gunion and b mcelrath 76 051105 2007 z heng et al 77 095012 2008 a belyaev et al 81 075021 2010 d das and u ellwanger arxiv 1007 1151 hep ph s andreas o lebedev s ramos sanchez and a ringwald arxiv 1005 3978 hep ph j f gunion jhep 0908 032 2009 r dermisek and j f gunion phys rev d 81 075003 2010 r dermisek and j f gunion phys lett 95 041801 2005 phys d 73 111701 2006 j cao h e logan j m yang 79 091701 2009 j cao p wan l wu j m yang 80 071701 2009 j f gunion and h e haber 67 075019 2003 r m barnett et al phys b 136 191 1984 r m barnett g senjanovic and d wyler phys d 30 1529 1984 y grossman nucl b 426 355 1994 h s goh l j hall and p kumar jhep 0905 097 2009 a g akeroyd and w j stirling nucl b 447 3 1995 a g akeroyd phys b 377 95 1996 h e logan and d maclennan phys rev d 79 115022 2009 m aoki et al arxiv 0902 4665 hep ph v barger p langacker h s lee and g shaughnessy phys d 73 115010 2006 s hesselbach et arxiv 0810 0511v2 hep ph de vivie and p janot aleph collaboration pa13 027 contribution to the international conference on high energy physics warsaw poland 2531 july 1996 j kurowska o grajek and p zalewski delphi collaboration cern open99 385 aleph collaboration and delphi collaboration and l3 collaboration phys rept 427 257 2006 j cao and j m yang jhep 0812 006 2008 m krawczyk and d temes eur j c 44 435 2005 g altarelli and r barbieri 253 161 1991 m e peskin t takeuchi 46 381 1992 c amsler et al particle data group 667 1 2008 o deschamps s descotes genon s monteil v niess s tjampens and v tisserand arxiv 0907 5135 hep ph s su and b thomas phys d 79 095014 2009 g abbiendi et al eur phys j c 32 453 2004 m davier et al 66 1 2010 k cheung et al phys d 64 111301 2001 k cheung and o c w kong phys d 68 053003 2003 t besmer c greub t hurth 609 359 2001 f borzumati et al 62 0750052000 j cao k i hikasa w wang j m yang and l x yu phys d 82 051701 2010 arxiv 1006 4811 hep ph j f gunion et d 73 015011 2006 martin and j d wells phys d 64 035003 2001 j abdallah et al eur j c 31 421 2004 g abbiendi et al eur j c 35 1 2004 j dunkley et al wmap collaboration astrophys j suppl 180 306 2009 arxiv 0803 0586 astro ph u ellwanger et al 02 066 2005 g belanger f boudjema a pukhov and a semenov comput commun 174 577 2006 comput phys commun 176 367 2007 g belanger f boudjema c hugonie a pukhov and a semenov jcap 0509 001 2005 articlemagnet rit is well known that the classical magnetoresistance mr in metals or semiconductors with a closed free electron fermi surface increases quadratically with increasing magnetic field xmath2 for xmath3 and saturates when xmath4 here xmath5 is the zero magnetic field mobility hence the extraordinarily high and linear mr lmr which breaks this familiar rule has been gaining much attention as soon as its discovery in the past decade this unexpected lmr has been reported in silver chalcogenide xcite indium antimonide xcite silicon xcite mnas gaas composite material xcite and graphene xcite kapitza s linear lawxcite indicates that the metal shows a magnetoresistance linear in perpendicular magnetic field when it has an open fermi surface and a mean free path longer than the electronic larmor radius recently another two models irrespective of the open fermi surface have been constructed to provide possible mechanisms for the lmr phenomenon abrikosov suggested a quantum limit origin of lmr for the homogenous system with a gapless linear energy spectrum xcite his model requires that landau levels are well formed and the carrier concentration is small that all electrons occupy only the lowest landau band alternatively parish and littlewood developed a classical model without involving linear spectrum xcite ignoring the concrete microscopic mechanism they attributed this unusual mr to the mobility fluctuations in a strongly inhomogenous system topological insulatorsxcite tis are novel materials with a full energy gap in bulk while there are gapless surface states due to its unique band structure with only one helical dirac cone and linear energy dispersion xcite the surface states of the ti bixmath0sexmath1 become an excellent platform for the study of quantum limit lmr the recent experiment in this flat surface system however reported that a large positive mr which becomes very linear above a characteristic field of xmath6xmath7xmath8 t was observed even in an opposite situation where the carrier sheet density is high that electrons occupy more than one landau levels xcite moreover they found that raising temperature to room temperature almost has no influence on the observed lmr it is striking that this observation is in conflict with abrikosov s model and also with the classical parish littlewood model so far a reliable theoretical scheme capable of explaining this novel experiment has still been lacking in this paper we generalize the balance equation approachxcite to a system modeling the surface states of a three dimensional ti to investigate the two dimensional magnetotransport in it we find that a positive nonsaturating and dominantly linear magnetoresistance can appear within quite wide magnetic field range in the ti surface state having a positive and finite effective g factor this linear magnetoresistance shows up in the system of high carrier concentration and low mobility when electrons are in extended states and spread over many smeared landau levels and persists up to room temperature providing a possible mechanism for the recently observed linear magnetoresistance in topological insulator bixmath0sexmath1 nanoribbons xcite we consider the surface state of a bixmath0sexmath1type large bulk gap ti in the xmath9xmath10 plane under the influence of a uniform magnetic field xmath11 applied along the xmath12 direction xcite following the experimental observation xcite we assume that the fermi energy locates in the gap of the bulk band and above the dirac point i e the surface carriers are electrons further the separations of the fermi energy from the bottom of bulk band and dirac point are much larger than the highest temperature xmath13 considered in this work hence the contribution from the bulk band to the magnetotransport is negligible these electrons scattered by randomly distributed impurities and by phonons are driven by a uniform in plane electric field xmath14 in the topological surface the hamiltonian of this many electron and phonon system consists of an electron part xmath15 a phonon part xmath16 and electron impurity and electron phonon interactions xmath17 and xmath18 xmath19 here the electron hamiltonian is taken in the form xmath20 in which xmath21 xmath22 xmath23 and xmath24 stand respectively for the canonical momentum coordinate momentum and spin operators of the xmath25th electron having charge xmath26 xmath27 is the vector potential of the perpendicular magnetic field xmath28 in the landau gauge xmath29 is the fermi velocity xmath30 is the effective g factor of the surface electron and xmath31 is the bohr magneton with xmath32 the free electron mass the sum index xmath25 in eq helectron goes over all electrons of total number xmath33 in the surface state of unit area in the frame work of balance equation approach xcite the two dimensional center of mass c m momentum and coordinate xmath34 and xmath35 and the relative electron momenta and coordinates xmath36 and xmath37 are introduced to write the hamiltonian xmath15 into the sum of a single particle c m part xmath38 and a many particle relative electron part xmath39 xmath40 with xmath41 endaligned in this xmath42 is the canonical momentum of the center of mass and xmath43 is the canonical momentum for the xmath25th relative electron here we have also introduced c m spin operators xmath44 and xmath45 the commutation relations between the c m spin operators xmath46 and xmath47 and the spin operators xmath48 xmath49 and xmath50 of the xmath25th electron are of order of xmath51 xmath52 n12 rm i varepsi lonbeta1beta2beta3sigmajbeta3 with xmath53 therefore for a macroscopic large xmath33 system the c m part xmath38 actually commutes with the relative electron part xmath54 in the hamiltonian i e the c m motion and the relative motion of electrons are truly separated from each other the couplings between the two emerge only through the electron impurity and electron phonon interactions furthermore the electric field xmath55 shows up only in xmath38 and in view of xmath56rm ideltaalpha betadeltaij1nsimeq rm ideltaalphabetadeltaij i e the relative electron momenta and coordinates can be treated as canonical conjugate variables the relative motion part xmath54 is just the hamiltonian of xmath33 electrons in the surface state of ti in the magnetic field without the presence of the electric field in terms of the c m coordinate xmath57 and the relative electron density operator xmath58 the electron impurity and electron phonon interactions can be written asxcite xmath59 here xmath60 and xmath61 are respectively the impurity potential an impurity at randomly distributed position xmath62 and electron phonon coupling matrix element in the plane wave representation and xmath63 with xmath64 and xmath65 being the creation and annihilation operators for a phonon of wavevector xmath66 in branch xmath67 having frequency xmath68 velocity operator xmath69 is the time variation of its coordinate xmath70 vrm fsigmarm cy hatisigmarm cx hatj to derive a force balance equation for steady state transport we consider the heisenberg equation for the rate of change of the c m canonical momentum xmath71 xmath72 n ebm vtimes bm b n ebm ebm frm ibm frm p in which the frictional forces xmath73 and xmath74 share the same expressions as given in ref the statistical average of the operator equation can be determined to linear order in the electron impurity and electron phonon interactions xmath17 and xmath18 with the initial density matrix xmath75 at temperature xmath76 when the in plane electric field xmath77 is not strong for steady transport states we have xmath78 leading to a force balance equation of the form xmath79 here xmath80 the statistically averaged velocity of the moving center of mass is identified as the average rate of change of its position i e the drift velocity of the electron system driven by the electric field xmath77 and xmath81 and xmath82 are frictional forces experienced by the center of mass due to impurity and phonon scatterings xmath83 labelfpendaligned in which xmath84 is the bose distribution function xmath85 and xmath86 stands for the imaginary part of the fourier spectrum of the relative electron density correlation function defined by xmath87bigrangle0 where xmath88 and xmath89 denotes the statistical averaging over the initial density matrix xmath90 xcite the force balance equation describes the steady state two dimensional magnetotransport in the surface state of a ti note that the frictional forces xmath81 and xmath82 are in the opposite direction of the drift velocity xmath91 and their magnitudes are functions of xmath92 only with the drift velocity xmath93 in the xmath9 direction the force balance equation eq yields a transverse resistivity xmath94 and a longitudinal resistivity xmath95 the linear one is in the form xmath96 for calculating the electron density correlation function xmath97 we proceed in the landau representation xcite the landau levels of the single particle hamiltonian xmath98 of the relative electron system in the absence of electric field are composed of a positive xmath99 and a negative xmath100 branchxcite xmath101 with xmath102 and xmath103 and a zero xmath104 level xmath105 the corresponding landau wave functions are xmath106 and xmath107 for xmath108 and xmath109 for xmath104 here xmath110 is the wavevector of the system along xmath9 direction xmath111 with xmath112 and xmath113 is the harmonic oscillator eigenfunction with xmath114 being the hermite polynomial xmath115 and xmath116 each landau level contains xmath117 electron states for system of unit surface area the positive branch xmath118 and the xmath104 level xmath119 of the above energy spectra are indeed quite close to those of the surface states in the bulk gap of bixmath0sexmath1family materials derived from microscopic band calculation xcite the landau levels are broadened due to impurity phonon and electron electron scatterings we model the imaginary part of the retarded green s function or the density of states of the broadened landau level xmath120 written for branch and xmath104 levels using a gaussian type form xcite xmath121 with a half width xmath122 of the form xcite xmath12312 here xmath124 is the single particle lifetime and xmath125 is the cyclotron frequency of linear energy dispersion system with xmath126 being the zero temperature fermi level using a semi empirical parameter xmath127 to relate xmath124 with the transport scattering time xmath128 and expressing xmath129 with the zero field mobility xmath5 at finite temperature xcite we can write the landau level broadening as xmath13012 in the present study we consider the case of xmath120doping i e the fermi level is high enough above the energy zero of the dirac cone in the range of branch levels and the states of xmath100 branch levels are completely filled that they are irrelevant to electron transport special attention has to be paid to the xmath104 level since depending on the direction of exchange potential the effective g factor of a ti surface state xmath30 can be positive zero or negative xcite the sign and magnitude of the effective g factor determines how many states of the zero level should be included in or excluded from the available states for electron occupation in the case of xmath120doping at a magnetic field i if xmath131 the xmath104 level center is exactly at xmath132 and the system is electron hole symmetric the total number of negative energy states including the states of the lower half of the xmath104 level and states of the xmath100branch levels and that of positive energy states including the states of the upper half of the xmath104 level and states of the xmath99branch levels do not change when changing magnetic field therefore the lower half negative energy states of this level are always filled and the upper half positive energy states of it are available for the occupation of particles which are counted as electrons participating in transport in the case of xmath120doping ii for a finite positive xmath133 the xmath104 level xmath134 moves downward to negative energy and its distance to the nearest xmath100branch level is xmath135 closer than to the nearest branch level at finite magnetic field strength xmath2 this is equivalent to the opening of an increasingly enlarged with increasing xmath2 energy gap between the branch states and the states of the zero level and the xmath100branch levels the opening of a sufficient energy gap implies that with increasing magnetic field the states in the branch levels would no longer shrink into the zero level and thus the xmath104 level should be completely excluded from the conduction band i e only particles occupying the branch states are counted as electrons participating in transport in the case of xmath120doping when the magnetic field xmath2 gets larger than a certain value depending on the magnitude of xmath30 iii for a finite negative xmath136 the xmath104 level xmath134 moves upward to positive energy and an increasingly enlarged energy gap will be opened between the states of the zero level and the branch and the states of xmath100branch levels and particles occupying the xmath104 level and branch states are electrons participating in transport when the magnetic field xmath2 gets larger than a certain value as a result the experimentally accessible sheet density xmath33 of electrons participating in transport is related to the fermi energy xmath137 by the following equation valid at finite xmath30 for the magnetic field xmath2 larger than a certain value xmath138 in which xmath139 11 is the fermi distribution function at temperature xmath76 and the summation index xmath120 goes over xmath140 for xmath133 or xmath141 for xmath136 in the case of xmath131 xmath142 valid for arbitrary magnetic field in which xmath143 the imaginary part of relative electron density correlation function in the presence of a magnetic field xmath86 can be expressed in the landau representation asxcite xmath144 in which the transform factor xmath145 2 endaligned with xmath146 xmath147 xmath148 and xmath149 being associated laguerre polynomials the landau representation correlation function xmath150 in eq piqw can be constructed with the imaginary part of the retarded green s function xmath151 or the density of states of the xmath120th landau level asxcite xmath152nonumber hspace1 2cmtimesrm imgnepsilonomegarm imgn epsilon endaligned the summation indices xmath120 and xmath153 in eq piqw are taken over xmath140 for xmath133 or xmath154 for xmath136 in the case of xmath131 eq piqw still works and the summation indices xmath120 and xmath153 go over xmath154 but with xmath155 replaced by xmath156 in eq p2nn numerical calculations are performed for the magnetoresistivity xmath157 of surface state in a uniform ti bixmath0sexmath1 at zero temperature the elastic scattering contributing to the resistivity is modeled by a coulomb potential due to charged impurities xcite xmath158 with xmath159 being the impurity density which is determined by the zero magnetic field mobility xmath5 at temperatures higher than xmath160 xcite phonon scatterings play increasingly important role and the dominant inelastic contribution comes from optical phonons for this polar material the scattering by optical phonons via the deformation potential can be neglected hence we take account of inelastic scattering from optical phonons via frhlich coupling xmath161 in the numerical calculation we use the following parameters xcite fermi velocity xmath162 static dielectric constant xmath163 optical dielectric constant xmath164 and phonon energy xmath165 the broadening parameter is taken to be xmath166 as a function of the magnetic field xmath2 having different effective g factors xmath167 and xmath168 for a ti surface system with electron sheet density xmath169 in the cases of zero magnetic field mobility xmath170 a and xmath171 b several integer number positions of filling factor xmath172 are marked in b scaledwidth40 0 fig diffg shows the calculated magnetoresistivity xmath157 versus the magnetic field strength xmath2 for a ti surface system with electron sheet density xmath169 but having different effective g factors xmath167 and xmath168 for two values of zero magnetic field mobility xmath170 and xmath171 representing different degree of landau level broadening in the case without zeeman splitting xmath131 the resistivity xmath157 exhibits almost no change with changing magnetic field up to 10 t except the shubnikov de haas sdh oscillation showing up in the case of xmath171 this kind of magnetoresistance behavior was indeed seen experimentally in the electron hole symmetrical massless system of single layer graphene xcite in the case of a positive g factor xmath173 the magnetoresistivity increases linearly with increasing magnetic field while for a negative g factor xmath174 the magnetoresistivity decreases linearly with increasing magnetic field is shown as a function of the magnetic field xmath2 for different values of zero magnetic field mobility a xmath175 b xmath176 c xmath177 d xmath178 e xmath179 and f xmath180 the inset of a illustrates the same for a larger magnetic field range xmath181 the filling factor xmath182 is plotted versus the magnetic field in f and several integer number positions of xmath182 are also marked in d and e here the surface electron density xmath169 and the lattice temperature xmath183 scaledwidth47 0 in the following we will give more detailed examination on the linearly increasing magnetoresistance in the positive xmath30 case fig rhob shows the calculated resistivity xmath157 versus the magnetic field strength xmath2 at lattice temperature xmath183 for system of carrier sheet density xmath169 and xmath173 having different zero field mobility xmath184 and xmath180 all resistivity curves for mobility xmath185 exhibit clear linearity in the magnetic field range and appear no tendency of saturation at the highest field shown in the figure especially for the case xmath170 the linear behavior extends even up to the magnetic field of xmath186 as illustrated in the inset of fig rhoba this feature contradicts the classical mr which saturates at sufficiently large magnetic field xmath187 note that here we only present the calculated xmath157 for magnetic field xmath2 larger than xmath188 t for which a sufficient energy gap xmath135 is assumed to open that with further increase of the magnetic field the states in the branch levels no longer shrink into the zero level and thus it should be excluded from the conduction band this is of course not true for very weak magnetic field when xmath189 the energy gap xmath190 the situation becomes similar to the case of xmath131 the whole upper half of the zero level states are available to electron occupation and we should have a flat resistivity xmath157 when changing magnetic field with increasing xmath2 the portion of the zero level states available to conduction electrons decreases until the magnetic field reaches xmath191 as a result the resistivity xmath157 should exhibit a crossover from a flat changing at small xmath2 to positively linear increasing at xmath192 this is just the behavior observed in the ti bixmath0sexmath1 xcite note that in the case of xmath170 the broadened landau level widths are always larger than the neighboring level interval xmath193 which requires xmath194 2 even for the lowest landau level xmath195 i e the whole landau level spectrum is smeared with increasing the zero field mobility the magnitude of resistivity xmath157 decreases and when the broadened landau level width becomes smaller than the neighboring level interval xmath196 a weak sdh oscillation begin to occur around the linearly dependent average value of xmath157 at higher portion of the magnetic field range as seen in fig rhobc d and e for xmath197 and xmath198 on the other hand in the case of large mobility e g xmath199 where the broadened landau level widths xmath200 are much smaller than the neighboring level interval even for level index xmath120 as large as xmath201 the magnetoresistivity shows pronounced sdh oscillation and the linear dependent behavior disappears before the appearance of quantum hall effect xcite as shown in fig rhobf abrikosov s model for the lmr requires the applied magnetic field large enough to reach the quantum limit at which all the carriers are within the lowest landau level xcite while it is obvious that more than one landau levels are occupied in the experimental samples in the field range in which the linear and non saturating magnetoresistivity was observed xcite for the given electron surface density xmath202 the number of occupied landau levels or the filling factor xmath172 at different magnetic fields is shown in fig rhobf as well as in the fig rhobd and e where the integer number positions of xmath203 i e filling up to entire xmath182 landau levels coincide with the minima of the density of states or the dips of sdh oscillation this is in contrast with xmath131 case where the integer number of xmath203 which implies a filling up to the center position of the xmath182th landau levels locates at a peak of sdh oscillation as shown in fig diffgb the observed sdh oscillations in the bixmath0sexmath1 nanoribbon exhibiting nonsaturating surface lmr in the experimentxcite favor the former case a finite positive effective xmath133 is plotted as a function of the surface electron density xmath33 at magnetic field xmath204 a at different values of zero field mobility xmath5 and b at different values of zero field conductivity xmath205 scaledwidth40 0 at various lattice temperatures here the zero magnetic field mobility at zero temperature is xmath206 scaledwidth35 0 next we examine the density dependence of the linear magnetoresistivity to compare with abrikosov s quantum magnetoresistance which suggests a xmath207 behavior xcite we show the calculated xmath208 for above lmr versus the carrier sheet density xmath33 in fig rhon at fixed magnetic field xmath209 t the mobility is taken respectively to be xmath210 and xmath211mxmath212vs to make the resistivity in the lmr regime a clearly linear dependence of xmath213 on the surface density xmath33 is seen in all cases indicating that this non saturating linear resistivity is almost inversely proportional to the carrier density in the figure we also show xmath208 versus xmath33 under the condition of different given conductivity xmath214 and xmath215 in this case the half width xmath216 is independent of surface density the linear dependence still holds indicating that this linear behavior is not sensitive to the modest xmath33dependence of landau level broadening xmath216 as long as the system is in the overlapped landau level regime from the above discussion it is obvious that lmr shows up in the system having overlapped landau levels and the separation of landau levels makes the mr departure from the linear increase at high temperature the thermal energy would smear the level separation and phonon scatterings further broaden landau levels hence it is believed that this lmr will be robust against raising temperature this is indeed the case as seen in fig rhot where we plot the calculated magnetoresistivity xmath157 for the above system with zero temperature linear mobility xmath217mxmath212vs versus the magnetic field at different lattice temperatures we can see that raising temperature to room temperature has little effect on the linearity of mr due to the decreased mobility at higher temperature from phonon scattering the weak sdh oscillation on the linear background tends to vanish these features are in good agreement with the experimental report xcite in summary we have studied the two dimensional magnetotransport in the flat surface of a three dimensional ti which arises from the surface states with a wavevector linear energy dispersion and a finite positive zeeman splitting within the bulk energy gap when the level broadening is comparable to or larger than the landau level separation and the conduction electrons spread over many landau levels a positive dominantly linear and non saturating magnetoresistance appears within a quite wide range of magnetic field and persists up to room temperature this remarkable lmr provides a possible mechanism for the recently observed linear magnetoresistance in topological insulator bixmath0sexmath1 nanoribbons xcite in contrast to quantum hall effect which appears in the case of well formed landau levels and to abrikosov s quantum magnetotransport xcite which is limited to the extreme quantum limit that all electrons coalesce into the lowest landau level the discussed lmr is a phenomena of pure classical two dimensional magnetotransport in a system having linear energy dispersion appearing in the regime of overlapped landau levels irrespective of its showing up in relatively high magnetic field range furthermore the present scheme deals with spatially uniform case without invoking the mobility fluctuation in a strongly inhomogeneous system which is required in the classical parish and littlewood model to produce a lmr xcite the appearance of this significant positive increasing linear magnetoresistance depends on the existence of a positive and sizable effective g factor if the zeeman energy splitting is quite small the resistivity xmath157 would exhibit little change with changing magnetic field in the case of a negative and sizable effective g factor the magnetoresistivity would decrease linearly with increasing magnetic field therefore the behavior of the longitudinal resistivity versus magnetic field may provide a useful way for judging the direction and the size of the effective zeeman energy splitting in ti surface states this work was supported by the national science foundation of china grant no 11104002 the national basic research program of china grant no 2012cb927403 and by the program for sciencetechnology innovation talents in universities of henan province grant no 2012hastit029 inputs tokenizer articlelep articlemagnet maxlength1024 paddingmaxlength truncationtrue returntensorspt inputs k inputsk totorchdevice for k in inputs hypothesesbatch model generateinputs expectedlep we study the rare decays xmath0 xmath1 at the gigaz option of the international linear collider ilc n we calculate the branching ratios of xmath2 in the two higgs doublet model 2hdm the minimal supersymmetric standard model mssm the next to minimal supersymmetric standard model nmssm and the nearly minimal supersymmetric standard model nmssm n we find that the branching ratios of xmath3 can reach xmath4 in 2hdm xmath5 in mssm xmath6 in nmssm and xmath7 in nmssm while they are much smaller than xmath8 in 2hdm xmath9 in mssm xmath10 in nmssm and xmath11 in nmssm expectedmagnet we investigate the two dimensional magnetotransport in the surface state of a topological insulator ti n we find that a positive nonsaturating and dominantly linear magnetoresistance can appear within quite wide magnetic field range in the ti surface state having a positive and finite effective g factor n this linear magnetoresistance shows up in the system of high carrier concentration and low mobility when electrons are in extended states and spread over many smeared landau levels and persists up to room temperature providing a possible mechanism for the recently observed linear magnetoresistance in topological insulator bixmath0sexmath1 nanoribbons generated tokenizer batchdecode hypothesesbatch tolist cleanuptokenizationspacestrue skipspecialtokenstrue self asserttruegenerated expectedlep expectedmagnet class bigbirdpegasusstandalonedecodermodeltester def init self parent vocabsize99 batchsize7 dmodel32 decoderseqlength7 istrainingtrue isdecodertrue useattentionmasktrue usecachefalse uselabelstrue decoderstarttokenid2 decoderffndim32 decoderlayers2 encoderattentionheads4 decoderattentionheads4 maxpositionembeddings30 isencoderdecoderfalse padtokenid0 bostokenid1 eostokenid2 scopenone attentiontypeoriginalfull usebiastrue blocksize16 numrandomblocks3 self parent parent self batchsize batchsize self decoderseqlength decoderseqlength for common tests self seqlength self decoderseqlength self istraining istraining self useattentionmask useattentionmask self uselabels uselabels self vocabsize vocabsize self dmodel dmodel self hiddensize dmodel self numhiddenlayers decoderlayers self decoderlayers decoderlayers self decoderffndim decoderffndim self encoderattentionheads encoderattentionheads self decoderattentionheads decoderattentionheads self numattentionheads decoderattentionheads self eostokenid eostokenid self bostokenid bostokenid self padtokenid padtokenid self decoderstarttokenid decoderstarttokenid self usecache usecache self maxpositionembeddings maxpositionembeddings self isencoderdecoder isencoderdecoder self scope none self decoderkeylength decoderseqlength self basemodeloutlen 2 self decoderattentionidx 1 self attentiontype attentiontype self usebias usebias self blocksize blocksize self numrandomblocks numrandomblocks def prepareconfigandinputsself inputids idstensorself batchsize self decoderseqlength self vocabsize attentionmask none if self useattentionmask attentionmask idstensorself batchsize self decoderseqlength vocabsize2 lmlabels none if self uselabels lmlabels idstensorself batchsize self decoderseqlength self vocabsize config bigbirdpegasusconfig vocabsizeself vocabsize dmodelself dmodel decoderlayersself decoderlayers decoderffndimself decoderffndim encoderattentionheadsself encoderattentionheads decoderattentionheadsself decoderattentionheads eostokenidself eostokenid bostokenidself bostokenid usecacheself usecache padtokenidself padtokenid decoderstarttokenidself decoderstarttokenid maxpositionembeddingsself maxpositionembeddings isencoderdecoderself isencoderdecoder attentiontypeself attentiontype usebiasself usebias blocksizeself blocksize numrandomblocksself numrandomblocks return config inputids attentionmask lmlabels def createandcheckdecodermodelpast self config inputids attentionmask lmlabels config usecache true model bigbirdpegasusdecoderconfigconfig totorchdevice eval first forward pass outputs modelinputids usecachetrue outputsusecacheconf modelinputids outputsnopast modelinputids usecachefalse self parent asserttruelenoutputs lenoutputsusecacheconf self parent asserttruelenoutputs lenoutputsnopast 1 pastkeyvalues outputspastkeyvalues create hypothetical next token and extent to nextinputids nexttokens idstensorself batchsize 1 config vocabsize append to next inputids and nextinputids torch catinputids nexttokens dim1 outputfromnopast modelnextinputidslasthiddenstate outputfrompast modelnexttokens pastkeyvaluespastkeyvalueslasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast nextinputids shape1 1 randomsliceidx detach outputfrompastslice outputfrompast 0 randomsliceidx detach test that outputs are equal for slice assert torch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def createandcheckdecodermodelattentionmaskpast self config inputids attentionmask lmlabels model bigbirdpegasusdecoderconfigconfig totorchdevice eval create attention mask attnmask torch onesinputids shape dtypetorch long devicetorchdevice halfseqlength inputids shape1 2 attnmask halfseqlength 0 first forward pass pastkeyvalues modelinputids attentionmaskattnmask usecachetruepastkeyvalues create hypothetical next token and extent to nextinputids nexttokens idstensorself batchsize 1 config vocabsize change a random masked slice from inputids randomseqidxtochange idstensor1 halfseqlength item 1 randomothernexttokens idstensorself batchsize 1 config vocabsize squeeze1 inputids randomseqidxtochange randomothernexttokens append to next inputids and attnmask nextinputids torch catinputids nexttokens dim1 attnmask torch cat attnmask torch onesattnmask shape0 1 dtypetorch long devicetorchdevice dim1 get two different outputs outputfromnopast modelnextinputidslasthiddenstate outputfrompast modelnexttokens pastkeyvaluespastkeyvalueslasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast nextinputids shape1 1 randomsliceidx detach outputfrompastslice outputfrompast 0 randomsliceidx detach test that outputs are equal for slice big bird has extremely high logits which requires such a high error tolerance here assert torch allcloseoutputfrompastslice outputfromnopastslice atol5e1 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask lmlabels configandinputs inputsdict inputids inputids attentionmask attentionmask return config inputsdict requiretorch class bigbirdpegasusstandalonedecodermodeltestmodeltestermixin generationtestermixin unittest testcase allmodelclasses bigbirdpegasusdecoder bigbirdpegasusforcausallm if istorchavailable else allgenerativemodelclasses bigbirdpegasusforcausallm if istorchavailable else testpruning false isencoderdecoder false def setup self self modeltester bigbirdpegasusstandalonedecodermodeltesterself istrainingfalse self configtester configtesterself configclassbigbirdpegasusconfig def testconfigself self configtester runcommontests def testdecodermodelpastself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastconfigandinputs def testdecodermodelattnmaskpastself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelattentionmaskpastconfigandinputs def testretaingradhiddenstatesattentionsself decoder cannot keep gradients return unittest skipthe model doesn t support left padding and it s not used enough to be worth fixing def testleftpaddingcompatibilityself pass coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bigbirdpegasus model eos token first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice torchscript tests are not passing for now also torchscript is not an important feature to have in the beginning todo fix the failed tests overwrite from generationtestermixin to solve problem with conflicting random seeds cut to half length take max batch_size 3 generate max 3 tokens hack to allow generate for models such as gpt2 as is done in generate this test can never pass for bigbird block sparse attention since input_ids must be multiple of block_size this test can t pass since attention matrix which is getting returned can t have gradients just 0 at many locations bigbirdpegasusforsequenceclassification does not support inputs_embeds automatic switch will happen fmt off noqa e231 fmt on fmt off noqa e231 fmt on fmt off noqa e231 fmt on fmt off noqa e231 fmt on the lep experiments at the resonance of xmath1 boson have tested the standard model sm at quantum level measuring the xmath1 decay into fermion pairs with an accuracy of one part in ten thousands the good agreement of the lep data with the sm predictions have severely constrained the behavior of new physics at the xmath1 pole taking these achievements into account one can imagine that the physics of xmath1 boson will again play the central role in the frontier of particle physics if the next generation xmath1 factory comes true with the generated xmath1 events several orders of magnitude higher than that of the lep this factory can be realized in the gigaz option of the international linear collider ilc xcite the ilc is a proposed electron positron collider with tunable energy ranging from xmath12 to xmath13 and polarized beams in its first phase and the gigaz option corresponds to its operation on top of the resonance of xmath1 boson by adding a bypass to its main beam line given the high luminosity xmath14 and the cross section at the resonance of xmath1 boson xmath15 about xmath16 xmath1 events can be generated in an operational year of xmath17 of gigaz which implies that the expected sensitivity to the branching ratio of xmath1 decay can be improved from xmath18 at the lep to xmath19 at the gigaz xcite in light of this the xmath1 boson properties especially its exotic or rare decays which are widely believed to be sensitive to new physics should be investigated comprehensively to evaluate their potential in probing new physics among the rare xmath1 decays the flavor changing fc processes were most extensively studied to explore the flavor texture in new physics xcite and it was found that although these processes are severely suppressed in the sm their branching ratios in new physics models can be greatly enhanced to xmath19 for lepton flavor violation decays xcite and xmath20 for quark flavor violation decays xcite besides the fc processes the xmath1 decay into light higgs boson s is another type of rare process that was widely studied e g the decay xmath21 xmath22 with the particle xmath0 denoting a light higgs boson was studied in xcite the decay xmath23 was studied in the two higgs doublet model 2hdm xcite and the minimal supersymmetric standard model mssm xcite and the decay xmath4 was studied in a model independent way xcite in 2hdm xcite and also in mssm xcite these studies indicate that in contrast with the kinematic forbidden of these decays in the sm the rates of these decays can be as large as xmath18 in new physics models which lie within the expected sensitivity of the gigaz in this work we extend the previous studies of these decays to some new models and investigate these decays altogether we are motivated by some recent studies on the singlet extension of the mssm such as the next to minimal supersymmetric standard model nmssm xcite and the nearly minimal supersymmetric standard model nmssm xcite where a light cp odd higgs boson xmath0 with singlet dominant component may naturally arise from the spontaneous breaking of some approximate global symmetry like xmath24 or peccei quuin symmetry xcite these non minimal supersymmetric models can not only avoid the xmath25 problem but also alleviate the little hierarchy by having such a light higgs boson xmath0 xcite we are also motivated by that with the latest experiments the properties of the light higgs boson are more stringently constrained than before so it is worth updating the previous studies so far there is no model independent lower bound on the lightest higgs boson mass in the sm it must be heavier than xmath26 gev obtained from the null observation of the higgs boson at lep experiments however due to the more complex structure of the higgs sector in the extensions of the sm this lower bound can be significantly relaxed according to recent studies e g for the cp odd higgs boson xmath0 we have xmath27 gev in the nmssm xcite xmath28 gev in the nmssm xcite and xmath29 gev in the lepton specific 2hdm l2hdm xcite with such a light cp odd higgs boson the z decay into one or more xmath0 is open up noting that the decay xmath30 is forbidden due to bose symmetry we in this work study the rare xmath1 decays xmath6 xmath22 xmath31 and xmath4 in a comparative way for four models namely the type ii 2hdm xcite the l2hdm xcite the nmssm and the nmssm in our study we examine carefully the constraints on the light xmath0 from many latest experimental results this work is organized as follows in sec ii we briefly describe the four new physics models in sec iii we present the calculations of the rare xmath1 decays in sec iv we list the constraints on the four new physics models in sec v we show the numerical results for the branching ratios of the rare xmath1 decays in various models finally the conclusion is given in sec as the most economical way the sm utilizes one higgs doublet to break the electroweak symmetry as a result the sm predicts only one physical higgs boson with its properties totally determined by two free parameters in new physics models the higgs sector is usually extended by adding higgs doublets and or singlets and consequently more physical higgs bosons are predicted along with more free parameters involved in the general 2hdm contains two xmath32 doublet higgs fields xmath33 and xmath34 and with the assumption of cp conserving its scalar potential can be parameterized as xcite xmath35 end aligned where xmath36 xmath37 are free dimensionless parameters and xmath38 xmath39 are the parameters with mass dimension after the electroweak symmetry breaking the spectrum of this higgs sector includes three massless goldstone modes which become the longitudinal modes of xmath40 and xmath1 bosons and five massive physical states two cp even higgs bosons xmath41 and xmath42 one neutral cp odd higgs particle xmath0 and a pair of charged higgs bosons xmath43 noting the constraint xmath44 with xmath45 and xmath46 denoting the vacuum expectation values vev of xmath33 and xmath34 respectively we choose xmath47 as the input parameters with xmath48 and xmath49 being the mixing angle that diagonalizes the mass matrix of the cp even higgs fields the difference between the type ii 2hdm and the l2hdm comes from the yukawa coupling of the higgs bosons to quark lepton in the type ii 2hdm one higgs doublet xmath34 generates the masses of up type quarks and the other doublet xmath33 generates the masses of down type quarks and charged leptons while in the l2hdm one higgs doublet xmath33 couples only to leptons and the other doublet xmath34 couples only to quarks so the yukawa interactions of xmath0 to fermions in these two models are given by xcite xmath50 with xmath51 denoting generation index obviously in the type ii 2hdm the xmath52 coupling and the xmath53 coupling can be simultaneously enhanced by xmath54 while in the l2hdm only the xmath53 coupling is enhanced by xmath55 the structures of the nmssm and the nmssm are described by their superpotentials and corresponding soft breaking terms which are given by xcite xmath56 where xmath57 is the superpotential of the mssm without the xmath25 term xmath58 and xmath59 are higgs doublet and singlet superfields with xmath60 and xmath61 being their scalar component respectively xmath62 xmath63 xmath64 xmath65 xmath66 and xmath67 are soft breaking parameters and xmath68 and xmath69 are coefficients of the higgs self interactions with the superpotentials and the soft breaking terms one can get the higgs potentials of the nmssm and the nmssm respectively like the 2hdm the higgs bosons with same cp property will mix and the mass eigenstates are obtained by diagonalizing the corresponding mass matrices xmath70 where the fields on the right hands of the equations are component fields of xmath71 xmath72 and xmath61 defined by xmath73 xmath74 and xmath75 are respectively the cp even and cp odd neutral higgs bosons xmath76 and xmath77 are goldstone bosons eaten by xmath1 and xmath78 and xmath79 is the charged higgs boson so both the nmssm and nmssm predict three cp even higgs bosons two cp odd higgs bosons and one pair of charged higgs bosons in general the lighter cp odd higgs xmath0 in these model is the mixture of the singlet field xmath80 and the doublet field combination xmath81 i e xmath82 and its couplings to down type quarks are then proportional to xmath83 so for singlet dominated xmath0 xmath84 is small and the couplings are suppressed as a comparison the interactions of xmath0 with the squarks are given by xcite xmath85 i e the interaction does not vanish when xmath86 approaches zero just like the 2hdm where we use the vevs of the higgs fields as fundamental parameters we choose xmath68 xmath69 xmath87 xmath88 xmath66 and xmath89 as input parameters for the nmssm xcite and xmath68 xmath54 xmath88 xmath65 xmath90 and xmath91 as input parameters for the nmssm xcite about the nmssm and the nmssm three points should be noted the first is for the two models there is no explicit xmath92term and the effective xmath25 parameter xmath93 is generated when the scalar component of xmath59 develops a vev the second is the nmssm is actually same as the nmssm with xmath94 xcite because the tadpole terms xmath95 and its soft breaking term xmath96 in the nmssm do not induce any interactions except for the tree level higgs boson masses and the minimization conditions and the last is despite of the similarities the nmssm has its own peculiarity which comes from its neutralino sector in the basis xmath97 its neutralino mass matrix is given by xcite xmath98 where xmath99 and xmath100 are xmath101 and xmath102 gaugino masses respectively xmath103 xmath104 xmath105 and xmath106 after diagonalizing this matrix one can get the mass eigenstate of the lightest neutralino xmath107 with mass taking the following form xcite xmath108 this expression implies that xmath107 must be lighter than about xmath109 gev for xmath110 from lower bound on chargnio mass and xmath111 perturbativity bound like the other supersymmetric models xmath107 as the lightest sparticle acts as the dark matter in the universe but due to its singlino dominated nature it is difficult to annihilate sufficiently to get the correct density in the current universe so the relic density of xmath107 plays a crucial way in selecting the model parameters for example as shown in xcite for xmath112 there is no way to get the correct relic density and for the other cases xmath107 mainly annihilates by exchanging xmath1 boson for xmath113 or by exchanging a light cp odd higgs boson xmath0 with mass satisfying the relation xmath114 for xmath115 for the annihilation xmath54 and xmath25 are required to be less than 10 and xmath116 respectively because through eq mass exp a large xmath87 or xmath25 will suppress xmath117 to make the annihilation more difficult the properties of the lightest cp odd higgs boson xmath0 such as its mass and couplings are also limited tightly since xmath0 plays an important role in xmath107 annihilation the phenomenology of the nmssm is also rather special and this was discussed in detail in xcite in the type ii 2hdm l2hdm nmssm and nmssm the rare xmath1 decays xmath118 xmath22 xmath3 and xmath4 may proceed by the feynman diagrams shown in fig fig1 fig fig2 and fig fig3 respectively for these diagrams the intermediate state xmath119 represents all possible cp even higgs bosons in the corresponding model i e xmath41 and xmath42 in type ii 2hdm and l2hdm and xmath41 xmath42 and xmath120 in nmssm and nmssm in order to take into account the possible resonance effects of xmath119 in fig fig1 c for xmath2 and fig fig3 a for xmath11 we have calculated all the decay modes of xmath119 and properly included the width effect in its propagator as to the decay xmath121 two points should be noted one is unlike the decays xmath6 and xmath11 this process proceeds only through loops mediated by quarks leptons in the type ii 2hdm and l2hdm and additionally by sparticles in the nmssm and nmssm so in most cases its rate should be much smaller than the other two the other is due to cp invariance loops mediated by squarks sleptons give no contribution to the decay xcite in actual calculation this is reflected by the fact that the coupling coefficient of xmath122 differs from that of xmath123 by a minus sign see eq asqsq and as a result the squark mediated contributions to xmath121 are completely canceled out with regard to the rare decay xmath11 we have more explanations in the lowest order this decay proceeds by the diagram shown in fig fig3 a and hence one may think that as a rough estimate it is enough to only consider the contributions from fig fig3 a however we note that in some cases of the type ii 2hdm and l2hdm due to the cancelation of the contributions from different xmath119 in fig fig3 a and also due to the potentially largeness of xmath124 couplings i e larger than the electroweak scale xmath125 the radiative correction from the higgs mediated loops may dominate over the tree level contribution even when the tree level prediction of the rate xmath126 exceeds xmath20 on the other hand we find the contribution from quark lepton mediated loops can be safely neglected if xmath127 in the type ii 2hdm and the l2hdm in the nmssm and the nmssm besides the corrections from the higgs and quark lepton mediated loops loops involving sparticles such as squarks charginos and neutralinos can also contribute to the decay we numerically checked that the contributions from squarks and charginos can be safely neglected if xmath127 we also calculated part of potentially large neutralino correction note that there are totally about xmath128 diagrams for such correction and found they can be neglected too since considering all the radiative corrections will make our numerical calculation rather slow we only include the most important correction namely that from higgs mediated loops in presenting our results for the four models one can intuitively understand the relative smallness of the sparticle contribution to xmath11 as follows first consider the squark contribution which is induced by the xmath129 interaction xmath130 denotes the squark in chirality state and the xmath131 interaction through box diagrams because the xmath132 interaction conserves the chirality of the squarks while the xmath133 interaction violates the chirality to get non zero contribution to xmath11 from the squark loops at least four chiral flippings are needed with three of them provided by xmath131 interaction and the rest provided by the left right squark mixing this means that if one calculates the amplitude in the chirality basis with the mass insertion method the amplitude is suppressed by the mixing factor xmath134 with xmath135 being the off diagonal element in squark mass matrix next consider the chargino neutralino contributions since for a light xmath0 its doublet component parameterized by xmath84 in eq mixing is usually small the couplings of xmath0 with the sparticles will never be tremendously large xcite so the chargino neutralino contributions are not important too in our calculation of the decays we work in the mass eigenstates of sparticles instead of in the chirality basis for the type ii 2hdm and the l2hdm we consider the following constraints xcite theoretical constraints on xmath136 from perturbativity unitarity and requirements that the scalar potential is finit at large field values and contains no flat directions xcite which imply that xmath137 the constraints from the lep search for neutral higgs bosons we compute the signals from the higgs strahlung production xmath138 xmath139 with xmath140 xcite and from the associated production xmath141 with xmath142 xcite and compare them with the corresponding lep data which have been inputted into our code we also consider the constraints from xmath138 by looking for a peak of xmath143 recoil mass distribution of xmath1 boson xcite and the constraint of xmath144 mev when xmath145 xcite these constraints limit the quantities such as xmath146 times br h_i to bar b b on the xmath147 plane with the the subscript xmath148 denoting the coupling coefficient of the xmath149 interaction they also impose a model dependent lower bound on xmath150 e g xmath151 for the type ii 2hdm from our scan results xmath152 for the l2hdm xcite and xmath153 for the nmssm xcite these bounds are significantly lower than that of the sm i e xmath154 partially because in new physics models unconventional decay modes of xmath155 such as xmath156 are open up as to the nmssm another specific reason for allowing a significantly lighter cp even higgs boson is that the boson may be singlet dominated in this model with regard to the lightest cp odd higgs boson xmath0 we checked that there is no lower bound on its mass so long as the xmath157 interaction is weak or xmath155 is sufficiently heavy the constraints from the lep search for a light higgs boson via the yukawa process xmath158 with xmath22 and xmath61 denoting a scalar xcite these constraints can limit the xmath159 coupling versus xmath160 in new physics models the constraints from the cleo iii limit on xmath161 and the latest babar limits on xmath162 these constraints will put very tight constraints on the xmath163 coupling for xmath164 in our analysis we use the results of fig 8 in the second paper of xcite to excluded the unfavored points the constraints from xmath165 couplings since the higgs sector can give sizable higher order corrections to xmath165 couplings we calculate them to one loop level and require the corrected xmath165 couplings to lie within the xmath166 range of their fitted value the sm predictions for the couplings at xmath1 pole are given by xmath167 and xmath168 xcite and the fitted values are given by xmath169 and xmath170 respectively xcite we adopt the formula in xcite to the 2hdm in our calculation the constraints from xmath171 leptonic decay we require the new physics correction to the branching ratio xmath172 to be in the range of xmath173 xcite we use the formula in xcite in our calculation about the constraints 5 and 6 two points should be noted one is all higgs bosons are involved in the constraints by entering the self energy of xmath171 lepton the xmath174 vertex correction or the xmath175 vertex correction and also the box diagrams for xmath176 xcite since the yukawa couplings of the higgs bosons to xmath171 lepton get enhanced by xmath54 and so do the corrections xmath54 must be upper bounded for given spectrum of the higgs sector generally speaking the lighter xmath0 is the more tightly xmath54 is limited xcite the other point is in the type ii 2hdm xmath177 b physics observables as well as xmath178 decays discussed above can constraint the model in a tighter way than the constraints 5 and 6 since the yukawa couplings of xmath171 lepton and xmath179 quark are simultaneously enhanced by xmath54 but for the l2hdm because only the yukawa couplings of xmath171 lepton get enhanced see eq yukawa the constraints 5 and 6 are more important in limiting xmath54 indirect constraints from the precision electroweak observables such as xmath180 xmath181 and xmath182 or their combinations xmath183 xcite we require xmath184 to be compatible with the lep sld data at xmath185 confidence level xcite we also require new physics prediction of xmath186 is within the xmath187 range of its experimental value the latest results for xmath188 are xmath189 measured value and xmath190 sm prediction for xmath191 gev xcite in our code we adopt the formula for these observables presented in xcite to the type ii 2hdm and the l2hdm respectively in calculating xmath180 xmath181 and xmath182 we note that these observables get dominant contributions from the self energies of the gauge bosons xmath1 xmath192 and xmath193 since there is no xmath194 coupling or xmath195 coupling xmath0 must be associated with the other higgs bosons to contribute to the self energies so by the uv convergence of these quantities one can infer that for the case of a light xmath0 and xmath196 these quantities depend on the spectrum of the higgs sector in a way like xmath197 at leading order which implies that a light xmath0 can still survive the constraints from the precision electroweak observables given the splitting between xmath150 and xmath198 is moderate xcite the constraints from b physics observables such as the branching ratios for xmath199 xmath200 and xmath201 and the mass differences xmath202 and xmath203 we require their theoretical predications to agree with the corresponding experimental values at xmath187 level in the type ii 2hdm and the l2hdm only the charged higgs boson contributes to these observables by loops so one can expect that xmath198 versus xmath54 is to be limited combined analysis of the limits in the type ii 2hdm has been done by the ckmfitter group and the lower bound of xmath204 as a function of xmath87 was given in fig 11 of xcite this analysis indicates that xmath198 must be heavier than xmath205 at xmath185 c l regardless the value of xmath54 in this work we use the results of fig 11 in xcite to exclude the unfavored points as for the l2hdm b physics actually can not put any constraints xcite because in this model the couplings of the charged higgs boson to quarks are proportional to xmath206 and in the case of large xmath54 which we are interested in they are suppressed in our analysis of the l2hdm we impose the lep bound on xmath198 i e xmath207 xcite the constraints from the muon anomalous magnetic moment xmath208 now both the theoretical prediction and the experimental measured value of xmath208 have reached a remarkable precision but a significant deviation still exists xmath209 xcite in the 2hdm xmath208 gets additional contributions from the one loop diagrams induced by the higgs bosons and also from the two loop barr zee diagrams mediated by xmath0 and xmath155 xcite if the higgs bosons are much heavier than xmath25 lepton mass the contributions from the barr zee diagrams are more important and to efficiently alleviate the discrepancy of xmath208 one needs a light xmath0 along with its enhanced couplings to xmath25 lepton and also to heavy fermions such as bottom quark and xmath171 lepton to push up the effects of the barr zee diagram xcite the cp even higgs bosons are usually preferred to be heavy since their contributions to xmath208 are negative in the type ii 2hdm because xmath54 is tightly constrained by the process xmath210 at the lep xcite and the xmath178 decay xcite the barr zee diagram contribution is insufficient to enhance xmath208 to xmath187 range around its measured value xcite so in our analysis we require the type ii 2hdm to explain xmath208 at xmath211 level while for the l2hdm xmath54 is less constrained compared with the type ii 2hdm and the barr zee diagram involving the xmath171 loop is capable to push up greatly the theoretical prediction of xmath208 xcite therefore we require the l2hdm to explain the discrepancy at xmath187 level unlike the other constraints discussed above the xmath208 constraint will put a two sided bound on xmath54 since on the one hand it needs a large xmath54 to enhance the barr zee contribution but on the other hand too large xmath54 will result in an unacceptable large xmath208 since this paper concentrates on a light xmath0 the decay xmath212 is open up with a possible large decay width we require the width of any higgs boson to be smaller than its mass to avoid a too fat higgs boson xcite we checked that for the scenario characterized by xmath213 the coefficient of xmath214 interaction is usually larger than the electroweak scale xmath125 and consequently a large decay width is resulted for the nmssm and nmssm the above constraints become more complicated because in these models not only more higgs bosons are involved in but also sparticles enter the constraints so it is not easy to understand some of the constraints intuitively take the process xmath199 as an example in the supersymmetric models besides the charged higgs contribution chargino loops gluino loops as well as neutralino loops also contribute to the process xcite and depending on the susy parameters any of these contributions may become dominated over or be canceled by other contributions as a result although the charged higgs affects the process in the same way as that in the type ii 2hdm charged higgs as light as xmath215 is still allowed even for xmath216 xcite since among the constraints xmath208 is rather peculiar in that it needs new physics to explain the discrepancy between xmath217 and xmath218 we discuss more about its dependence on susy parameters in the nmssm and the nmssm xmath208 receives contributions from higgs loops and neutralino chargino loops for the higgs contribution it is quite similar to that of the type ii 2hdm except that more higgs bosons are involved in xcite for the neutralino chargino contribution in the light bino limit i e xmath219 it can be approximated by xcite xmath220 for xmath221 with xmath222 being smuon mass so combining the two contributions together one can learn that a light xmath0 along with large xmath54 and or light smuon with moderate xmath87 are favored to dilute the discrepancy because more parameters are involved in the constraints on the supersymmetric models we consider following additional constraints to further limit their parameters direct bounds on sparticle masses from the lep1 the lep2 and the tevatron experiments xcite the lep1 bound on invisible z decay xmath223 the lep2 bound on neutralino production xmath224 and xmath225 xcite dark matter constraints from the wmap relic density 0 0975 xmath226 0 1213 xcite note that among the above constraints the constraint 2 on higgs sector and the constraint c on neutralino sector are very important this is because in the supersymmetric models the sm like higgs is upper bounded by about xmath227 at tree level and by about xmath228 at loop level and that the relic density restricts the lsp annihilation cross section in a certain narrow range in our analysis of the nmssm we calculate the constraints 3 and 5 7 by ourselves and utilize the code nmssmtools xcite to implement the rest constraints we also extend nmssmtools to the nmssm to implement the constraints for the extension the most difficult thing we faced is how to adapt the code micromegas xcite to the nmssm case we solve this problem by noting the following facts as we mentioned before the nmssm is actually same as the nmssm with the trilinear singlet term setting to zero so we can utilize the model file of the nmssm as the input of the micromegas and set xmath229 since in the nmssm the lsp is too light to annihilate into higgs pairs there is no need to reconstruct the effective higgs potential to calculate precisely the annihilation channel xmath230 with xmath61 denoting any of higgs bosons xcite we thank the s of the nmssmtools for helpful discussion on this issue when we finish such extension xcite with the above constraints we perform four independent random scans over the parameter space of the type ii 2hdm the l2hdm the nmssm and the nmssm respectively we vary the parameters in following ranges xmath231 for the type ii 2hdm xmath232 for the l2hdm xmath233 for the nmssm and xmath234 for the nmssm in performing the scans we note that for the nmssm and the nmssm some constraints also rely on the gaugino masses and the soft breaking parameters in the squark sector and the slepton sector since these parameters affect little on the properties of xmath0 we fix them to reduce the number of free parameters in our scan for the squark sector we adopt the xmath235 scenario which assumes that the soft mass parameters for the third generation squarks are degenerate xmath236 800 gev and that the trilinear couplings of the third generation squarks are also degenerate xmath237 with xmath238 for the slepton sector we assume all the soft breaking masses and trilinear parameters to be 100 gev this setting is necessary for the nmssm since this model is difficult to explain the muon anomalous moment at xmath239 level for heavy sleptons xcite finally we assume the grand unification relation xmath240 for the gaugino masses with xmath241 being fine structure constants of the different gauge group with large number of random points in the scans we finally get about xmath242 xmath243 xmath244 and xmath242 samples for the type ii 2hdm the l2hdm the nmssm and the nmssm respectively which survive the constraints and satisfy xmath245 analyzing the properties of the xmath0 indicates that for most of the surviving points in the nmssm and the nmssm its dominant component is the singlet field numerically speaking xmath246 so that its couplings to the sm fermions are suppressed xcite our analysis also indicates that the main decay products of xmath0 are xmath247 for the l2hdm xcite xmath248 dominant and xmath247 subdominant for the type ii 2hdm the nmssm and the nmssm and in some rare cases neutralino pairs in the nmssm xcite in fig fig4 we project the surviving samples on the xmath249 plane this figure shows that the allowed range of xmath54 is from xmath250 to xmath251 in the type ii 2hdm and from xmath252 to xmath253 in the l2hdm just as we introduced before the lower bounds of xmath254 come from the fact that we require the models to explain the muon anomalous moment while the upper bound is due to we have imposed the constraint from the lep process xmath255 which have limited the upper reach of the xmath256 coupling for light xmath61 xcite for the dependence of xmath256 coupling on xmath54 see sec this figure also indicates that for the nmssm and the nmssm xmath54 is upper bounded by xmath257 for the nmssm this is because large xmath87 can suppress the dark matter mass to make its annihilation difficult see xcite and also sec ii but for the nmssm this is because we choose a light slepton mass so that large xmath54 can enhance xmath208 too significantly to be experimentally unacceptable we checked that for the slepton mass as heavy as xmath258 xmath259 is still allowed for the nmssm in fig fig5 and fig fig6 we show the branching ratios of xmath260 and xmath261 respectively fig fig5 indicates among the four models the type ii 2hdm predicts the largest ratio for xmath260 with its value varying from xmath262 to xmath263 the underlying reason is in the type ii 2hdm the xmath264 coupling is enhanced by xmath54 see fig fig4 while in the other three model the coupling is suppressed either by xmath265 or by the singlet component of the xmath0 fig fig6 shows that the l2hdm predicts the largest rate for xmath266 with its value reaching xmath5 in optimum case and for the other three models the ratio of xmath261 is at least about one order smaller than that of xmath267 this feature can be easily understood from the xmath268 coupling introduced in sect we emphasize that if the nature prefers a light xmath0 xmath260 and or xmath269 in the type ii 2hdm and the l2hdm will be observable at the gigaz then by the rates of the two decays one can determine whether the type ii 2hdm or the l2hdm is the right theory on the other hand if both decays are observed with small rates or fail to be observed the singlet extensions of the mssm are favored in fig fig7 we show the rate of xmath3 as the function of xmath270 this figure indicates that the branching ratio of xmath121 can reach xmath271 xmath272 xmath273 and xmath274 for the optimal cases of the type ii 2hdm the l2hdm the nmssm and the nmssm respectively which implies that the decay xmath121 will never be observable at the gigaz if the studied model is chosen by nature the reason for the smallness is as we pointed out before that the decay xmath121 proceeds only at loop level comparing the optimum cases of the type ii 2hdm the nmssm and the nmssm shown in fig 5 7 one may find that the relation xmath275 holds for any of the decays this is because the decays are all induced by the yukawa couplings with similar structure for the models in the supersymmetric models the large singlet component of the light xmath0 is to suppress the yukawa couplings and the xmath0 in the nmssm has more singlet component than that in the nmssm next we consider the decay xmath11 which unlike the above decays depends on the higgs self interactions in fig fig8 we plot its rate as a function of xmath270 and this figure indicates that the xmath276 may be the largest among the ratios of the exotic xmath1 decays reaching xmath277 in the optimum cases of the type ii 2hdm the l2hdm and the nmssm the underlying reason is in some cases the intermediate state xmath119 in fig fig3 a may be on shell in fact we find this is one of the main differences between the nmssm and the nmssm that is in the nmssm xmath119 in fig fig3 a may be on shell corresponds to the points with large xmath278 while in the nmssm this seems impossible so we conclude that the decay xmath11 may serve as an alternative channel to test new physics models especially it may be used to distinguish the nmssm from the nmssm if the supersymmetry is found at the lhc and the xmath11 is observed at the gigaz with large rate before we end our discussion we note that in the nmssm the higgs boson xmath0 may be lighter than xmath279 without conflicting with low energy data from xmath178 decays and the other observables see fig fig4 fig8 in this case xmath0 is axion like as pointed out in xcite we checked that among the rare xmath1 decays discussed in this paper the largest branching ratio comes from xmath280 which can reach xmath281 since in this case the decay product of xmath0 is highly collinear muon pair detecting the decay xmath280 may need some knowledge about detectors which is beyond our discussion in this paper we studied the rare xmath1 decays xmath2 xmath7 xmath282 and xmath4 in the type ii 2hdm lepton specific 2hdm nmssm and nmssm which predict a light cp odd higgs boson xmath0 in the parameter space allowed by current experiments the branching ratio can be as large as xmath5 for xmath118 xmath8 for xmath3 and xmath9 for xmath4 which implies that the decays xmath2 and xmath283 may be accessible at the gigaz option since different models predict different size of branching ratios these decays can be used to distinguish different model through the measurement of these rare decays this work was supported in part by hastit under grant no 2009hastit004 by the national natural science foundation of china nnsfc under grant nos 10821504 10725526 10635030 10775039 11075045 and by the project of knowledge innovation program pkip of chinese academy of sciences under grant no for some reviews see e g m a perez g tavares velasco and j j toscano int j mod a 19 159 2004 j m yang arxiv 1006 2594 j i illana m masip 67 035004 2003 j cao z xiong j m yang 32 245 2004 d atwood _ et al_ 66 093005 2002 j kalinowski and s pokorski 219 116 1989 a djouadi p m zerwas and j zunft 259 175 1991 a djouadi j kalinowski and p m zerwas z phys c 54 255 1992 m krawczyk _ et al _ 19 463 2001 8 495 1999 j f gunion g gamberini and s f novaes 38 3481 1988 thomas j weiler and tzu chiang yuan 318 337 1989 a djouadi _ et al _ 1 163 1998 hep ph 9701342 d chang and w y keung phys lett 77 3732 1996 e keith and e ma 57 2017 1998 m a perez g tavares velasco and j j toscano int j mod phys a 19 159 2004 f larios g tavares velasco and c p yuan 64 055004 2001 66 075006 2002 a djouadi _ et al _ 10 27 1999 hep ph 9903229 for a detailed introduction of the nmssm see f franke and h fraas int j mod a 12 1997 479 for a recent review of the nmssm see for example u ellwanger c hugonie and a m teixeira arxiv 0910 1785 see e g j r ellis j f gunion h e haber l roszkowski and f zwirner phys rev d 39 1989 844 m drees int j mod phys a 4 1989 3635 u ellwanger m rausch de traubenberg and c a savoy phys b 315 1993 331 nucl b 492 1997 21 d j miller r nevzorov p m zerwas 681 3 2004 c panagiotakopoulos k tamvakis 446 224 1999 469 145 1999 c panagiotakopoulos a pilaftsis 63 055003 2001 a dedes _ et al _ 63 055009 2001 a menon _ et al _ 70 035005 2004 v barger _ et al _ 630 85 2005 c balazs _ et al _ 0706 066 2007 b a dobrescu k t matchev 0009 031 2000 a arhrib k cheung t j hou k w song hep ph 0611211 0703 073 2007 x g he j tandean and g valencia 98 081802 2007 0806 002 2008 f domingo _ et al_ 0901 061 2009 gudrun hiller 70 034018 2004 r dermisek and john f gunion 75 075019 2007 79 055014 2009 81 055001 2010 r dermisek john f gunion and b mcelrath 76 051105 2007 z heng _ et al_ 77 095012 2008 a belyaev _ et al_ 81 075021 2010 d das and u ellwanger arxiv 1007 1151 hep ph s andreas o lebedev s ramos sanchez and a ringwald arxiv 1005 3978 hep ph j f gunion jhep 0908 032 2009 r dermisek and j f gunion phys rev d 81 075003 2010 r dermisek and j f gunion phys lett 95 041801 2005 phys d 73 111701 2006 j cao h e logan j m yang 79 091701 2009 j cao p wan l wu j m yang 80 071701 2009 j f gunion and h e haber 67 075019 2003 r m barnett _ et al _ phys b 136 191 1984 r m barnett g senjanovic and d wyler phys d 30 1529 1984 y grossman nucl b 426 355 1994 h s goh l j hall and p kumar jhep 0905 097 2009 a g akeroyd and w j stirling nucl b 447 3 1995 a g akeroyd phys b 377 95 1996 h e logan and d maclennan phys rev d 79 115022 2009 m aoki _ et al _ arxiv 0902 4665 hep ph v barger p langacker h s lee and g shaughnessy phys d 73 115010 2006 s hesselbach _ et _ arxiv 0810 0511v2 hep ph de vivie and p janot aleph collaboration pa13 027 contribution to the international conference on high energy physics warsaw poland 2531 july 1996 j kurowska o grajek and p zalewski delphi collaboration cern open 99 385 aleph collaboration and delphi collaboration and l3 collaboration phys rept 427 257 2006 j cao and j m yang jhep 0812 006 2008 m krawczyk and d temes eur j c 44 435 2005 g altarelli and r barbieri 253 161 1991 m e peskin t takeuchi 46 381 1992 c amsler _ et al _ particle data group 667 1 2008 o deschamps s descotes genon s monteil v niess s tjampens and v tisserand arxiv 0907 5135 hep ph s su and b thomas phys d 79 095014 2009 g abbiendi _ et al _ eur phys j c 32 453 2004 m davier _ et al _ 66 1 2010 k cheung _ et al _ phys d 64 111301 2001 k cheung and o c w kong phys d 68 053003 2003 t besmer c greub t hurth 609 359 2001 f borzumati _ et al _ 62 075005 2000 j cao k i hikasa w wang j m yang and l x yu phys d 82 051701 2010 arxiv 1006 4811 hep ph j f gunion _ et d 73 015011 2006 martin and j d wells phys d 64 035003 2001 j abdallah _ et al _ eur j c 31 421 2004 g abbiendi _ et al _ eur j c 35 1 2004 j dunkley _ et al _ wmap collaboration astrophys j suppl 180 306 2009 arxiv 0803 0586 astro ph u ellwanger _ et al _ 02 066 2005 g belanger f boudjema a pukhov and a semenov comput commun 174 577 2006 comput phys commun 176 367 2007 g belanger f boudjema c hugonie a pukhov and a semenov jcap 0509 001 2005 it is well known that the classical magnetoresistance mr in metals or semiconductors with a closed free electron fermi surface increases quadratically with increasing magnetic field xmath2 for xmath3 and saturates when xmath4 here xmath5 is the zero magnetic field mobility hence the extraordinarily high and linear mr lmr which breaks this familiar rule has been gaining much attention as soon as its discovery in the past decade this unexpected lmr has been reported in silver chalcogenide xcite indium antimonide xcite silicon xcite mnas gaas composite material xcite and graphene xcite kapitza s linear law xcite indicates that the metal shows a magnetoresistance linear in perpendicular magnetic field when it has an open fermi surface and a mean free path longer than the electronic larmor radius recently another two models irrespective of the open fermi surface have been constructed to provide possible mechanisms for the lmr phenomenon abrikosov suggested a quantum limit origin of lmr for the homogenous system with a gapless linear energy spectrum xcite his model requires that landau levels are well formed and the carrier concentration is small that all electrons occupy only the lowest landau band alternatively parish and littlewood developed a classical model without involving linear spectrum xcite ignoring the concrete microscopic mechanism they attributed this unusual mr to the mobility fluctuations in a strongly inhomogenous system topological insulators xcite tis are novel materials with a full energy gap in bulk while there are gapless surface states due to its unique band structure with only one helical dirac cone and linear energy dispersion xcite the surface states of the ti bi xmath0se xmath1 become an excellent platform for the study of quantum limit lmr the recent experiment in this flat surface system however reported that a large positive mr which becomes very linear above a characteristic field of xmath6 xmath7 xmath8 t was observed even in an opposite situation where the carrier sheet density is high that electrons occupy more than one landau levels xcite moreover they found that raising temperature to room temperature almost has no influence on the observed lmr it is striking that this observation is in conflict with abrikosov s model and also with the classical parish littlewood model so far a reliable theoretical scheme capable of explaining this novel experiment has still been lacking in this paper we generalize the balance equation approach xcite to a system modeling the surface states of a three dimensional ti to investigate the two dimensional magnetotransport in it we find that a positive nonsaturating and dominantly linear magnetoresistance can appear within quite wide magnetic field range in the ti surface state having a positive and finite effective g factor this linear magnetoresistance shows up in the system of high carrier concentration and low mobility when electrons are in extended states and spread over many smeared landau levels and persists up to room temperature providing a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi xmath0se xmath1 nanoribbons xcite we consider the surface state of a bi xmath0se xmath1 type large bulk gap ti in the xmath9 xmath10 plane under the influence of a uniform magnetic field xmath11 applied along the xmath12 direction xcite following the experimental observation xcite we assume that the fermi energy locates in the gap of the bulk band and above the dirac point i e the surface carriers are electrons further the separations of the fermi energy from the bottom of bulk band and dirac point are much larger than the highest temperature xmath13 considered in this work hence the contribution from the bulk band to the magnetotransport is negligible these electrons scattered by randomly distributed impurities and by phonons are driven by a uniform in plane electric field xmath14 in the topological surface the hamiltonian of this many electron and phonon system consists of an electron part xmath15 a phonon part xmath16 and electron impurity and electron phonon interactions xmath17 and xmath18 xmath19 here the electron hamiltonian is taken in the form xmath20 in which xmath21 xmath22 xmath23 and xmath24 stand respectively for the canonical momentum coordinate momentum and spin operators of the xmath25th electron having charge xmath26 xmath27 is the vector potential of the perpendicular magnetic field xmath28 in the landau gauge xmath29 is the fermi velocity xmath30 is the effective g factor of the surface electron and xmath31 is the bohr magneton with xmath32 the free electron mass the sum index xmath25 in eq helectron goes over all electrons of total number xmath33 in the surface state of unit area in the frame work of balance equation approach xcite the two dimensional center of mass c m momentum and coordinate xmath34 and xmath35 and the relative electron momenta and coordinates xmath36 and xmath37 are introduced to write the hamiltonian xmath15 into the sum of a single particle c m part xmath38 and a many particle relative electron part xmath39 xmath40 with xmath41 end aligned in this xmath42 is the canonical momentum of the center of mass and xmath43 is the canonical momentum for the xmath25th relative electron here we have also introduced c m spin operators xmath44 and xmath45 the commutation relations between the c m spin operators xmath46 and xmath47 and the spin operators xmath48 xmath49 and xmath50 of the xmath25th electron are of order of xmath51 xmath52 n 1 2 rm i varepsi lon_ beta_1 beta_2 beta_3 sigma_j beta_3 with xmath53 therefore for a macroscopic large xmath33 system the c m part xmath38 actually commutes with the relative electron part xmath54 in the hamiltonian i e the c m motion and the relative motion of electrons are truly separated from each other the couplings between the two emerge only through the electron impurity and electron phonon interactions furthermore the electric field xmath55 shows up only in xmath38 and in view of xmath56 rm i delta_ alpha beta delta_ ij 1 n simeq rm i delta_ alpha beta delta_ ij i e the relative electron momenta and coordinates can be treated as canonical conjugate variables the relative motion part xmath54 is just the hamiltonian of xmath33 electrons in the surface state of ti in the magnetic field without the presence of the electric field in terms of the c m coordinate xmath57 and the relative electron density operator xmath58 the electron impurity and electron phonon interactions can be written as xcite xmath59 here xmath60 and xmath61 are respectively the impurity potential an impurity at randomly distributed position xmath62 and electron phonon coupling matrix element in the plane wave representation and xmath63 with xmath64 and xmath65 being the creation and annihilation operators for a phonon of wavevector xmath66 in branch xmath67 having frequency xmath68 velocity operator xmath69 is the time variation of its coordinate xmath70 v_ rm f sigma_ rm c y hat i sigma_ rm c x hat j to derive a force balance equation for steady state transport we consider the heisenberg equation for the rate of change of the c m canonical momentum xmath71 xmath72 n e bm v times bm b n e bm e bm f _ rm i bm f _ rm p in which the frictional forces xmath73 and xmath74 share the same expressions as given in ref the statistical average of the operator equation can be determined to linear order in the electron impurity and electron phonon interactions xmath17 and xmath18 with the initial density matrix xmath75 at temperature xmath76 when the in plane electric field xmath77 is not strong for steady transport states we have xmath78 leading to a force balance equation of the form xmath79 here xmath80 the statistically averaged velocity of the moving center of mass is identified as the average rate of change of its position i e the drift velocity of the electron system driven by the electric field xmath77 and xmath81 and xmath82 are frictional forces experienced by the center of mass due to impurity and phonon scatterings xmath83 label fp end aligned in which xmath84 is the bose distribution function xmath85 and xmath86 stands for the imaginary part of the fourier spectrum of the relative electron density correlation function defined by xmath87 big rangle_ 0 where xmath88 and xmath89 denotes the statistical averaging over the initial density matrix xmath90 xcite the force balance equation describes the steady state two dimensional magnetotransport in the surface state of a ti note that the frictional forces xmath81 and xmath82 are in the opposite direction of the drift velocity xmath91 and their magnitudes are functions of xmath92 only with the drift velocity xmath93 in the xmath9 direction the force balance equation eq yields a transverse resistivity xmath94 and a longitudinal resistivity xmath95 the linear one is in the form xmath96 for calculating the electron density correlation function xmath97 we proceed in the landau representation xcite the landau levels of the single particle hamiltonian xmath98 of the relative electron system in the absence of electric field are composed of a positive xmath99 and a negative xmath100 branch xcite xmath101 with xmath102 and xmath103 and a zero xmath104 level xmath105 the corresponding landau wave functions are xmath106 and xmath107 for xmath108 and xmath109 for xmath104 here xmath110 is the wavevector of the system along xmath9 direction xmath111 with xmath112 and xmath113 is the harmonic oscillator eigenfunction with xmath114 being the hermite polynomial xmath115 and xmath116 each landau level contains xmath117 electron states for system of unit surface area the positive branch xmath118 and the xmath104 level xmath119 of the above energy spectra are indeed quite close to those of the surface states in the bulk gap of bi xmath0se xmath1 family materials derived from microscopic band calculation xcite the landau levels are broadened due to impurity phonon and electron electron scatterings we model the imaginary part of the retarded green s function or the density of states of the broadened landau level xmath120 written for branch and xmath104 levels using a gaussian type form xcite xmath121 with a half width xmath122 of the form xcite xmath123 1 2 here xmath124 is the single particle lifetime and xmath125 is the cyclotron frequency of linear energy dispersion system with xmath126 being the zero temperature fermi level using a semi empirical parameter xmath127 to relate xmath124 with the transport scattering time xmath128 and expressing xmath129 with the zero field mobility xmath5 at finite temperature xcite we can write the landau level broadening as xmath130 1 2 in the present study we consider the case of xmath120 doping i e the fermi level is high enough above the energy zero of the dirac cone in the range of branch levels and the states of xmath100 branch levels are completely filled that they are irrelevant to electron transport special attention has to be paid to the xmath104 level since depending on the direction of exchange potential the effective g factor of a ti surface state xmath30 can be positive zero or negative xcite the sign and magnitude of the effective g factor determines how many states of the zero level should be included in or excluded from the available states for electron occupation in the case of xmath120 doping at a magnetic field i if xmath131 the xmath104 level center is exactly at xmath132 and the system is electron hole symmetric the total number of negative energy states including the states of the lower half of the xmath104 level and states of the xmath100 branch levels and that of positive energy states including the states of the upper half of the xmath104 level and states of the xmath99 branch levels do not change when changing magnetic field therefore the lower half negative energy states of this level are always filled and the upper half positive energy states of it are available for the occupation of particles which are counted as electrons participating in transport in the case of xmath120 doping ii for a finite positive xmath133 the xmath104 level xmath134 moves downward to negative energy and its distance to the nearest xmath100 branch level is xmath135 closer than to the nearest branch level at finite magnetic field strength xmath2 this is equivalent to the opening of an increasingly enlarged with increasing xmath2 energy gap between the branch states and the states of the zero level and the xmath100 branch levels the opening of a sufficient energy gap implies that with increasing magnetic field the states in the branch levels would no longer shrink into the zero level and thus the xmath104 level should be completely excluded from the conduction band i e only particles occupying the branch states are counted as electrons participating in transport in the case of xmath120 doping when the magnetic field xmath2 gets larger than a certain value depending on the magnitude of xmath30 iii for a finite negative xmath136 the xmath104 level xmath134 moves upward to positive energy and an increasingly enlarged energy gap will be opened between the states of the zero level and the branch and the states of xmath100 branch levels and particles occupying the xmath104 level and branch states are electrons participating in transport when the magnetic field xmath2 gets larger than a certain value as a result the experimentally accessible sheet density xmath33 of electrons participating in transport is related to the fermi energy xmath137 by the following equation valid at finite xmath30 for the magnetic field xmath2 larger than a certain value xmath138 in which xmath139 1 1 is the fermi distribution function at temperature xmath76 and the summation index xmath120 goes over xmath140 for xmath133 or xmath141 for xmath136 in the case of xmath131 xmath142 valid for arbitrary magnetic field in which xmath143 the imaginary part of relative electron density correlation function in the presence of a magnetic field xmath86 can be expressed in the landau representation as xcite xmath144 in which the transform factor xmath145 2 end aligned with xmath146 xmath147 xmath148 and xmath149 being associated laguerre polynomials the landau representation correlation function xmath150 in eq piqw can be constructed with the imaginary part of the retarded green s function xmath151 or the density of states of the xmath120th landau level as xcite xmath152 nonumber hspace 1 2cm times rm im g_n epsilon omega rm im g_ n epsilon end aligned the summation indices xmath120 and xmath153 in eq piqw are taken over xmath140 for xmath133 or xmath154 for xmath136 in the case of xmath131 eq piqw still works and the summation indices xmath120 and xmath153 go over xmath154 but with xmath155 replaced by xmath156 in eq p2nn numerical calculations are performed for the magnetoresistivity xmath157 of surface state in a uniform ti bi xmath0se xmath1 at zero temperature the elastic scattering contributing to the resistivity is modeled by a coulomb potential due to charged impurities xcite xmath158 with xmath159 being the impurity density which is determined by the zero magnetic field mobility xmath5 at temperatures higher than xmath160 xcite phonon scatterings play increasingly important role and the dominant inelastic contribution comes from optical phonons for this polar material the scattering by optical phonons via the deformation potential can be neglected hence we take account of inelastic scattering from optical phonons via frhlich coupling xmath161 in the numerical calculation we use the following parameters xcite fermi velocity xmath162 static dielectric constant xmath163 optical dielectric constant xmath164 and phonon energy xmath165 the broadening parameter is taken to be xmath166 as a function of the magnetic field xmath2 having different effective g factors xmath167 and xmath168 for a ti surface system with electron sheet density xmath169 in the cases of zero magnetic field mobility xmath170 a and xmath171 b several integer number positions of filling factor xmath172 are marked in b scaledwidth 40 0 fig diffg shows the calculated magnetoresistivity xmath157 versus the magnetic field strength xmath2 for a ti surface system with electron sheet density xmath169 but having different effective g factors xmath167 and xmath168 for two values of zero magnetic field mobility xmath170 and xmath171 representing different degree of landau level broadening in the case without zeeman splitting xmath131 the resistivity xmath157 exhibits almost no change with changing magnetic field up to 10 t except the shubnikov de haas sdh oscillation showing up in the case of xmath171 this kind of magnetoresistance behavior was indeed seen experimentally in the electron hole symmetrical massless system of single layer graphene xcite in the case of a positive g factor xmath173 the magnetoresistivity increases linearly with increasing magnetic field while for a negative g factor xmath174 the magnetoresistivity decreases linearly with increasing magnetic field is shown as a function of the magnetic field xmath2 for different values of zero magnetic field mobility a xmath175 b xmath176 c xmath177 d xmath178 e xmath179 and f xmath180 the inset of a illustrates the same for a larger magnetic field range xmath181 the filling factor xmath182 is plotted versus the magnetic field in f and several integer number positions of xmath182 are also marked in d and e here the surface electron density xmath169 and the lattice temperature xmath183 scaledwidth 47 0 in the following we will give more detailed examination on the linearly increasing magnetoresistance in the positive xmath30 case fig rhob shows the calculated resistivity xmath157 versus the magnetic field strength xmath2 at lattice temperature xmath183 for system of carrier sheet density xmath169 and xmath173 having different zero field mobility xmath184 and xmath180 all resistivity curves for mobility xmath185 exhibit clear linearity in the magnetic field range and appear no tendency of saturation at the highest field shown in the figure especially for the case xmath170 the linear behavior extends even up to the magnetic field of xmath186 as illustrated in the inset of fig rhob a this feature contradicts the classical mr which saturates at sufficiently large magnetic field xmath187 note that here we only present the calculated xmath157 for magnetic field xmath2 larger than xmath188 t for which a sufficient energy gap xmath135 is assumed to open that with further increase of the magnetic field the states in the branch levels no longer shrink into the zero level and thus it should be excluded from the conduction band this is of course not true for very weak magnetic field when xmath189 the energy gap xmath190 the situation becomes similar to the case of xmath131 the whole upper half of the zero level states are available to electron occupation and we should have a flat resistivity xmath157 when changing magnetic field with increasing xmath2 the portion of the zero level states available to conduction electrons decreases until the magnetic field reaches xmath191 as a result the resistivity xmath157 should exhibit a crossover from a flat changing at small xmath2 to positively linear increasing at xmath192 this is just the behavior observed in the ti bi xmath0se xmath1 xcite note that in the case of xmath170 the broadened landau level widths are always larger than the neighboring level interval xmath193 which requires xmath194 2 even for the lowest landau level xmath195 i e the whole landau level spectrum is smeared with increasing the zero field mobility the magnitude of resistivity xmath157 decreases and when the broadened landau level width becomes smaller than the neighboring level interval xmath196 a weak sdh oscillation begin to occur around the linearly dependent average value of xmath157 at higher portion of the magnetic field range as seen in fig rhob c d and e for xmath197 and xmath198 on the other hand in the case of large mobility e g xmath199 where the broadened landau level widths xmath200 are much smaller than the neighboring level interval even for level index xmath120 as large as xmath201 the magnetoresistivity shows pronounced sdh oscillation and the linear dependent behavior disappears before the appearance of quantum hall effect xcite as shown in fig rhob f abrikosov s model for the lmr requires the applied magnetic field large enough to reach the quantum limit at which all the carriers are within the lowest landau level xcite while it is obvious that more than one landau levels are occupied in the experimental samples in the field range in which the linear and non saturating magnetoresistivity was observed xcite for the given electron surface density xmath202 the number of occupied landau levels or the filling factor xmath172 at different magnetic fields is shown in fig rhob f as well as in the fig rhob d and e where the integer number positions of xmath203 i e filling up to entire xmath182 landau levels coincide with the minima of the density of states or the dips of sdh oscillation this is in contrast with xmath131 case where the integer number of xmath203 which implies a filling up to the center position of the xmath182th landau levels locates at a peak of sdh oscillation as shown in fig diffg b the observed sdh oscillations in the bi xmath0se xmath1 nanoribbon exhibiting nonsaturating surface lmr in the experiment xcite favor the former case a finite positive effective xmath133 is plotted as a function of the surface electron density xmath33 at magnetic field xmath204 a at different values of zero field mobility xmath5 and b at different values of zero field conductivity xmath205 scaledwidth 40 0 at various lattice temperatures here the zero magnetic field mobility at zero temperature is xmath206 scaledwidth 35 0 next we examine the density dependence of the linear magnetoresistivity to compare with abrikosov s quantum magnetoresistance which suggests a xmath207 behavior xcite we show the calculated xmath208 for above lmr versus the carrier sheet density xmath33 in fig rhon at fixed magnetic field xmath209 t the mobility is taken respectively to be xmath210 and xmath211m xmath212 vs to make the resistivity in the lmr regime a clearly linear dependence of xmath213 on the surface density xmath33 is seen in all cases indicating that this non saturating linear resistivity is almost inversely proportional to the carrier density in the figure we also show xmath208 versus xmath33 under the condition of different given conductivity xmath214 and xmath215 in this case the half width xmath216 is independent of surface density the linear dependence still holds indicating that this linear behavior is not sensitive to the modest xmath33 dependence of landau level broadening xmath216 as long as the system is in the overlapped landau level regime from the above discussion it is obvious that lmr shows up in the system having overlapped landau levels and the separation of landau levels makes the mr departure from the linear increase at high temperature the thermal energy would smear the level separation and phonon scatterings further broaden landau levels hence it is believed that this lmr will be robust against raising temperature this is indeed the case as seen in fig rhot where we plot the calculated magnetoresistivity xmath157 for the above system with zero temperature linear mobility xmath217m xmath212 vs versus the magnetic field at different lattice temperatures we can see that raising temperature to room temperature has little effect on the linearity of mr due to the decreased mobility at higher temperature from phonon scattering the weak sdh oscillation on the linear background tends to vanish these features are in good agreement with the experimental report xcite in summary we have studied the two dimensional magnetotransport in the flat surface of a three dimensional ti which arises from the surface states with a wavevector linear energy dispersion and a finite positive zeeman splitting within the bulk energy gap when the level broadening is comparable to or larger than the landau level separation and the conduction electrons spread over many landau levels a positive dominantly linear and non saturating magnetoresistance appears within a quite wide range of magnetic field and persists up to room temperature this remarkable lmr provides a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi xmath0se xmath1 nanoribbons xcite in contrast to quantum hall effect which appears in the case of well formed landau levels and to abrikosov s quantum magnetotransport xcite which is limited to the extreme quantum limit that all electrons coalesce into the lowest landau level the discussed lmr is a phenomena of pure classical two dimensional magnetotransport in a system having linear energy dispersion appearing in the regime of overlapped landau levels irrespective of its showing up in relatively high magnetic field range furthermore the present scheme deals with spatially uniform case without invoking the mobility fluctuation in a strongly inhomogeneous system which is required in the classical parish and littlewood model to produce a lmr xcite the appearance of this significant positive increasing linear magnetoresistance depends on the existence of a positive and sizable effective g factor if the zeeman energy splitting is quite small the resistivity xmath157 would exhibit little change with changing magnetic field in the case of a negative and sizable effective g factor the magnetoresistivity would decrease linearly with increasing magnetic field therefore the behavior of the longitudinal resistivity versus magnetic field may provide a useful way for judging the direction and the size of the effective zeeman energy splitting in ti surface states this work was supported by the national science foundation of china grant no 11104002 the national basic research program of china grant no 2012cb927403 and by the program for science technology innovation talents in universities of henan province grant no 2012hastit029 for common tests first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice big bird has extremely high logits which requires such a high error tolerance here decoder cannot keep gradients and it s not used enough to be worth fixing
import copy import tempfile import unittest from transformers import BigBirdPegasusConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, PegasusTokenizer, ) from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import ( BigBirdPegasusDecoder, BigBirdPegasusEncoder, ) MODEL_ID = "google/bigbird-pegasus-large-pubmed" def prepare_bigbird_pegasus_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) input_dict = { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } input_dict = {k: input_dict[k].to(torch_device) for k in input_dict} return input_dict class BigBirdPegasusModelTester: def __init__( self, parent, batch_size=7, seq_length=256, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=31, hidden_act="gelu_fast", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=260, eos_token_id=1, pad_token_id=0, bos_token_id=2, attention_type="block_sparse", use_bias=False, block_size=16, num_random_blocks=3, scale_embedding=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.attention_type = attention_type self.use_bias = use_bias self.block_size = block_size self.num_random_blocks = num_random_blocks self.scale_embedding = scale_embedding def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_bigbird_pegasus_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return BigBirdPegasusConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, attention_type=self.attention_type, use_bias=self.use_bias, block_size=self.block_size, num_random_blocks=self.num_random_blocks, scale_embedding=self.scale_embedding, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BigBirdPegasusModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = BigBirdPegasusModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = BigBirdPegasusEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = BigBirdPegasusDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) def create_and_check_model(self, config, inputs_dict): model = BigBirdPegasusModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] decoder_input_ids = inputs_dict["decoder_input_ids"] result = model(input_ids, decoder_input_ids=decoder_input_ids, use_cache=True) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) @require_torch class BigBirdPegasusModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BigBirdPegasusModel, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForSequenceClassification, BigBirdPegasusForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (BigBirdPegasusForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": BigBirdPegasusForConditionalGeneration, "feature-extraction": BigBirdPegasusModel, "question-answering": BigBirdPegasusForQuestionAnswering, "summarization": BigBirdPegasusForConditionalGeneration, "text-classification": BigBirdPegasusForSequenceClassification, "text-generation": BigBirdPegasusForCausalLM, "text2text-generation": BigBirdPegasusForConditionalGeneration, "translation": BigBirdPegasusForConditionalGeneration, "zero-shot": BigBirdPegasusForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True test_missing_keys = False test_pruning = False test_head_masking = False test_torchscript = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.attention_type = "original_full" input_ids = inputs_dict[self.input_name] attention_mask = torch.ones_like(input_ids, dtype=torch.long) sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:batch_size, :sequence_length] attention_mask = attention_mask[:batch_size, :sequence_length] max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: config.pad_token_id = config.eos_token_id return config, input_ids, attention_mask, max_length def setUp(self): self.model_tester = BigBirdPegasusModelTester(self) self.config_tester = ConfigTester(self, config_class=BigBirdPegasusConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_model_various_attn_type(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["original_full", "block_sparse"]: config_and_inputs[0].attention_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_generate_without_input_ids(self): if self.model_tester.attention_type == "block_sparse": return super().test_generate_without_input_ids() def test_retain_grad_hidden_states_attentions(self): if self.model_tester.attention_type == "block_sparse": return super().test_retain_grad_hidden_states_attentions() def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in ( BigBirdPegasusModel, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, ): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_dict.pop("decoder_attention_mask") input_dict.pop("decoder_input_ids") model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(**input_dict) model.generate(**input_dict, do_sample=True, early_stopping=False, num_return_sequences=3) @slow def test_batched_forward_original_full(self): self._check_batched_forward(attn_type="original_full") @slow def test_batched_forward_block_sparse(self): self._check_batched_forward(attn_type="block_sparse", tolerance=1e-1) def _check_batched_forward(self, attn_type, tolerance=1e-3): config, _ = self.model_tester.prepare_config_and_inputs() config.max_position_embeddings = 128 config.block_size = 16 config.attention_type = attn_type model = BigBirdPegasusForConditionalGeneration(config).to(torch_device) model.eval() chunk_length = 32 sample_with_padding = [3, 8, 11] * chunk_length + [0] * chunk_length sample_without_padding = [4, 7, 9, 13] * chunk_length target_ids_without_padding = [2, 3] * 8 target_ids_with_padding = [7, 8] * 6 + 4 * [-100] attention_mask = torch.tensor( [[1] * 3 * chunk_length + [0] * chunk_length, [1] * 4 * chunk_length], device=torch_device, dtype=torch.long, ) input_ids = torch.tensor([sample_with_padding, sample_without_padding], device=torch_device, dtype=torch.long) labels = torch.tensor( [target_ids_without_padding, target_ids_with_padding], device=torch_device, dtype=torch.long ) with torch.no_grad(): logits_batched = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels).logits with torch.no_grad(): logits_single_first = model(input_ids=input_ids[:1, :-chunk_length], labels=labels[:1]).logits self.assertTrue(torch.allclose(logits_batched[0, -3:], logits_single_first[0, -3:], atol=tolerance)) with torch.no_grad(): logits_single_second = model(input_ids=input_ids[1:], labels=labels[1:, :-4]).logits self.assertTrue(torch.allclose(logits_batched[1, :3], logits_single_second[0, :3], atol=tolerance)) def test_auto_padding(self): ids = [[7, 6, 9] * 65] config, _ = self.model_tester.prepare_config_and_inputs() input_ids = torch.tensor(ids, device=torch_device, dtype=torch.long) attention_mask = input_ids.new_ones(input_ids.shape) decoder_input_ids = torch.tensor([[33, 5, 8] * 3], device=torch_device, dtype=torch.long) config.block_size = 8 model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device) output1 = model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids)[ "logits" ] ids = [[7, 6, 9] * 65 + [0] * 5] input_ids = torch.tensor(ids, device=torch_device, dtype=torch.long) attention_mask = torch.tensor([[1] * 3 * 65 + [0] * 5], device=torch_device, dtype=torch.long) output2 = model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids)[ "logits" ] self.assertTrue(torch.allclose(output1, output2, atol=1e-5)) def test_for_change_to_full_attn(self): self.model_tester.seq_length = 9 config, input_dict = self.model_tester.prepare_config_and_inputs() config.attention_type = "block_sparse" model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device) state_dict = model.state_dict() outputs1 = model(**input_dict)["logits"] config.attention_type = "original_full" model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device) model.load_state_dict(state_dict) outputs2 = model(**input_dict)["logits"] self.assertTrue(torch.allclose(outputs1, outputs2, atol=1e-5)) @require_torch @require_sentencepiece @require_tokenizers @slow class BigBirdPegasusModelIntegrationTests(unittest.TestCase): def _get_dummy_input_ids(self): ids = torch.tensor( [[685, 560, 630, 193, 836, 764, 708, 360, 10, 724, 278, 755, 805, 600, 71, 473, 601, 397, 315, 706, 487, 552, 88, 175, 601, 850, 678, 538, 846, 73, 778, 917, 116, 977, 756, 710, 1023, 848, 432, 449, 851, 100, 985, 178, 756, 798, 660, 148, 911, 424, 289, 962, 266, 698, 640, 545, 544, 715, 245, 152, 676, 511, 460, 883, 184, 29, 803, 129, 129, 933, 54, 902, 551, 489, 757, 274, 336, 389, 618, 43, 443, 544, 889, 258, 322, 1000, 938, 58, 292, 871, 120, 780, 431, 83, 92, 897, 399, 612, 566, 909, 634, 939, 85, 204, 325, 775, 965, 48, 640, 1013, 132, 973, 869, 181, 1001, 847, 144, 661, 228, 955, 792, 720, 910, 374, 854, 561, 306, 582, 170, 676, 449, 96, 198, 607, 257, 882, 691, 293, 931, 817, 862, 388, 611, 555, 974, 369, 1000, 918, 202, 384, 513, 907, 371, 556, 955, 384, 24, 700, 131, 378, 99, 575, 932, 735, 124, 964, 595, 943, 740, 149, 210, 563, 412, 783, 42, 59, 706, 37, 779, 87, 44, 873, 12, 771, 308, 81, 33, 183, 129, 807, 276, 175, 555, 372, 185, 445, 489, 590, 287, 281, 638, 771, 516, 95, 227, 876, 270, 881, 297, 329, 20, 608, 841, 411, 451, 249, 181, 324, 1005, 830, 783, 865, 261, 964, 750, 140, 1021, 599, 462, 890, 622, 844, 697, 529, 153, 926, 150, 111, 26, 465, 957, 890, 887, 118, 446, 596, 674, 873, 929, 229, 508, 764, 122, 327, 470, 288, 526, 840, 697, 153, 592, 42, 275, 553, 439, 208, 780, 167, 112, 350, 1018, 130, 736, 887, 813, 217, 382, 25, 68, 979, 1008, 772, 235, 717, 999, 292, 727, 1023, 702, 710, 728, 556, 33, 12, 617, 213, 139, 695, 1004, 422, 638, 669, 624, 489, 771, 540, 980, 218, 664, 822, 308, 175, 149, 950, 542, 580, 548, 808, 394, 74, 298, 920, 900, 815, 731, 947, 877, 772, 800, 778, 395, 540, 430, 200, 424, 62, 342, 866, 45, 803, 931, 89, 34, 646, 233, 768, 37, 769, 460, 291, 198, 895, 950, 255, 81, 447, 137, 190, 130, 210, 369, 292, 377, 348, 169, 885, 805, 177, 538, 324, 872, 509, 804, 115, 799, 30, 754, 290, 147, 274, 222, 341, 510, 515, 70, 358, 909, 557, 886, 766, 323, 624, 92, 342, 424, 552, 972, 663, 415, 658, 711, 968, 275, 861, 44, 84, 434, 810, 94, 175, 406, 202, 858, 499, 481, 988, 330, 541, 1004, 210, 618, 955, 897, 983, 576, 17, 107, 165, 607, 537, 629, 192, 196, 308, 137, 953, 860, 94, 892, 751, 88, 161, 148, 585, 456, 88, 14, 315, 594, 121, 885, 952, 833, 716, 733, 933, 282, 801, 427, 783, 471, 285, 277, 979, 325, 535, 228, 891, 596, 648, 969, 574, 654, 518, 257, 137, 208, 464, 950, 140, 5, 424, 349, 942, 283, 587, 821, 1007, 434, 220, 820, 740, 874, 787, 374, 291, 564, 671, 438, 827, 940, 824, 509, 1021, 787, 942, 856, 450, 327, 491, 54, 817, 95, 60, 337, 667, 637, 164, 571, 946, 107, 202, 301, 782, 890, 839, 551, 680, 649, 14, 1017, 904, 721, 1017, 535, 505, 848, 986, 777, 740, 775, 210, 456, 469, 474, 963, 573, 401, 57, 883, 750, 664, 281, 5, 613, 1005, 306, 344, 543, 567, 154, 789, 354, 358, 698, 408, 412, 30, 930, 372, 822, 632, 948, 855, 503, 8, 618, 1010, 138, 695, 897, 852, 377, 933, 722, 149, 886, 1009, 260, 127, 811, 578, 533, 805, 325, 977, 113, 944, 651, 238, 361, 991, 860, 556, 64, 928, 917, 455, 266, 445, 604, 624, 420, 340, 845, 275, 370, 843, 227, 226, 940, 644, 909, 229, 827, 898, 370, 129, 808, 25, 699, 293, 356, 838, 135, 4, 227, 890, 681, 445, 418, 285, 837, 27, 737, 249, 366, 948, 202, 438, 198, 930, 648, 638, 607, 73, 247, 853, 136, 708, 214, 476, 621, 324, 103, 853, 328, 596, 224, 257, 646, 348, 108, 927, 970, 980, 520, 150, 998, 477, 393, 684, 559, 1, 361, 692, 551, 90, 75, 500, 739, 636, 344, 97, 852, 283, 719, 33, 116, 455, 866, 429, 828, 826, 691, 174, 746, 133, 442, 94, 348, 402, 420, 707, 405, 942, 186, 976, 376, 677, 874, 703, 517, 498, 499, 206, 415, 366, 856, 739, 420, 586, 219, 952, 539, 375, 23, 461, 720, 355, 603, 52, 999, 815, 721, 574, 445, 816, 1019, 105, 641, 395, 972, 910, 328, 607, 519, 686, 246, 415, 528, 170, 167, 310, 940, 595, 392, 221, 834, 682, 835, 115, 861, 335, 742, 220, 247, 101, 416, 222, 179, 509, 175, 606, 627, 674, 781, 737, 746, 849, 67, 457, 1012, 126, 139, 625, 731, 156, 697, 121, 322, 449, 710, 857, 291, 976, 4, 701, 239, 678, 172, 724, 857, 583, 661, 903, 797, 628, 903, 835, 605, 989, 615, 870, 380, 710, 110, 330, 101, 695, 846, 918, 508, 672, 594, 36, 238, 244, 251, 393, 767, 282, 22, 430, 230, 983, 401, 154, 1007, 120, 678, 896, 386, 390, 711, 397, 347, 587, 1020, 951, 79, 831, 585, 200, 814, 134, 560, 700, 171, 452, 139, 755, 314, 476, 346, 388, 126, 719, 851, 198, 699, 901, 18, 710, 448, 351, 665, 644, 326, 425, 165, 571, 178, 440, 665, 674, 915, 866, 463, 754, 136, 950, 748, 47, 497, 1013, 640, 930, 338, 158, 525, 631, 815, 887, 289, 803, 116, 600, 637, 410, 175, 499, 876, 565, 1002, 623, 577, 333, 887, 586, 147, 773, 776, 644, 49, 77, 294, 117, 494, 561, 110, 979, 180, 562, 72, 859, 434, 1007, 286, 516, 75, 597, 491, 322, 888, 533, 209, 43, 499, 29, 411, 856, 181, 305, 963, 615, 778, 259, 373, 877, 746, 858, 381, 886, 613, 91, 69, 618, 523, 13, 617, 226, 422, 168, 929, 379, 290, 923, 100, 218, 307, 345, 211, 789, 735, 669, 585, 275, 410, 921, 552, 235, 636, 285, 665, 659, 708, 173, 724, 302, 823, 1, 139, 708, 903, 732, 868, 442, 967, 916, 163, 51, 243, 871]], dtype=torch.long, device=torch_device, ) return ids def _get_dummy_target_ids(self): ids = torch.tensor( [[13, 6, 1, 4, 12, 4, 8, 10, 4, 6, 3, 5, 8, 7, 9, 9]], dtype=torch.long, device=torch_device, ) return ids def test_inference_block_sparse(self): model = BigBirdPegasusForConditionalGeneration.from_pretrained( MODEL_ID, attention_type="block_sparse", block_size=16, num_random_blocks=3 ) model.to(torch_device) input_ids = self._get_dummy_input_ids() target_ids = self._get_dummy_target_ids() outputs = model(input_ids, labels=target_ids) prediction_logits = outputs.logits self.assertEqual(prediction_logits.shape, torch.Size((1, 16, 96103))) expected_prediction_logits_slice = torch.tensor( [[1.5118, 5.5227, 4.8125, 1.7603, 8.1704, 3.996, 4.8118, 6.7806, 2.2297, 6.9834, 3.1906, 0.103, 7.1515, 6.3679, 3.1896, 6.3054, 3.9741, 6.3772, 5.0042, -0.6338, 6.7868, 0.592, 0.5363, 1.87, -0.331, -2.4518, 1.8263, 3.1899], [1.5702, 5.8135, 4.6675, 2.3674, 8.9828, 3.7913, 5.4027, 7.6567, 1.9007, 7.3706, 3.8824, 0.0247, 7.6094, 6.6985, 3.2826, 7.0094, 3.8713, 5.6555, 5.0439, -0.3519, 7.1525, 0.4062, -0.2419, 2.2194, -0.6447, -2.9614, 2.0713, 3.248], [1.4527, 5.6003, 4.5381, 2.6382, 9.2809, 3.2969, 5.6811, 8.4011, 1.6909, 7.4937, 4.3185, -0.0878, 7.61, 6.6822, 3.4753, 7.3962, 3.5336, 4.9216, 4.943, -0.2043, 7.3326, 0.2199, -0.6016, 2.4367, -0.7043, -3.0689, 2.3215, 3.0611], [1.1084, 5.6308, 4.4886, 2.717, 9.4103, 3.0733, 5.5825, 8.4325, 1.3075, 7.5495, 4.4782, -0.1092, 7.8115, 6.6285, 3.5311, 7.6853, 3.509, 4.4994, 4.9224, -0.1384, 7.3069, -0.0473, -0.8578, 2.4632, -0.5249, -3.4627, 2.2671, 2.8818]], device=torch_device, ) self.assertTrue( torch.allclose(prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, atol=1e-4) ) def test_inference_full_attn(self): model = BigBirdPegasusForConditionalGeneration.from_pretrained(MODEL_ID, attention_type="original_full") model.to(torch_device) input_ids = self._get_dummy_input_ids() target_ids = self._get_dummy_target_ids() outputs = model(input_ids, labels=target_ids) prediction_logits = outputs.logits self.assertEqual(prediction_logits.shape, torch.Size((1, 16, 96103))) expected_prediction_logits_slice = torch.tensor( [[1.3418, 5.8304, 6.5662, 2.0448, 8.7702, 4.6579, 4.9947, 6.429, 2.4296, 7.9431, 4.217, 0.0672, 7.334, 5.1966, 2.9603, 6.0814, 4.6756, 7.5522, 5.076, 0.213, 6.6638, 0.6577, 0.244, 2.1221, 0.7531, -2.4076, 1.8731, 3.5594], [1.5525, 6.0524, 6.309, 2.6245, 9.229, 4.5213, 5.0913, 7.0622, 1.7992, 8.0962, 4.7994, -0.0248, 7.7168, 5.5878, 3.0883, 6.5248, 4.7895, 6.9974, 4.8787, 0.5445, 6.6686, 0.0102, -0.1659, 2.6195, 0.7389, -2.8956, 1.9928, 3.3777], [1.6407, 6.2104, 6.0331, 2.8076, 9.4074, 3.9772, 5.0574, 7.5316, 1.4201, 8.3035, 5.0212, -0.1031, 7.553, 5.5023, 3.1427, 6.7674, 4.4409, 6.457, 4.525, 0.728, 6.5422, -0.6234, -0.4726, 2.7486, 0.6985, -3.0804, 1.9669, 3.2365], [1.5065, 6.1271, 5.8296, 2.8405, 9.5649, 3.6834, 5.1214, 7.546, 0.9758, 8.3335, 5.1952, -0.1395, 7.4348, 5.6893, 3.2942, 7.0356, 4.1665, 5.9695, 4.3898, 0.8931, 6.3988, -0.8957, -0.7522, 2.8924, 0.6498, -3.4358, 1.8654, 2.9735]], device=torch_device, ) self.assertTrue( torch.allclose(prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, atol=1e-4) ) def test_seq_to_seq_generation(self): MODEL_ID = "google/bigbird-pegasus-large-arxiv" model = BigBirdPegasusForConditionalGeneration.from_pretrained(MODEL_ID).to(torch_device) tokenizer = PegasusTokenizer.from_pretrained(MODEL_ID) ARTICLE_LEP = r ARTICLE_MAGNET = r inputs = tokenizer( [ARTICLE_LEP, ARTICLE_MAGNET], max_length=1024, padding="max_length", truncation=True, return_tensors="pt", ) inputs = {k: inputs[k].to(torch_device) for k in inputs} hypotheses_batch = model.generate(**inputs) EXPECTED_LEP = ( "we study the rare decays @xmath0 ( @xmath1 ) at the gigaz option of the international linear collider " "( ilc ).<n> we calculate the branching ratios of @xmath2 in the two higgs doublet model ( 2hdm ), the " "minimal supersymmetric standard model ( mssm ), the next - to - minimal supersymmetric standard model " "( nmssm ) and the nearly minimal supersymmetric standard model ( nmssm ).<n> we find that the branching " "ratios of @xmath3 can reach @xmath4 in 2hdm, @xmath5 in mssm, @xmath6 in nmssm and @xmath7 in nmssm, " "while they are much smaller than @xmath8 in 2hdm, @xmath9 in mssm, @xmath10 in nmssm and @xmath11 in " "nmssm." ) EXPECTED_MAGNET = ( "we investigate the two - dimensional magnetotransport in the surface state of a topological insulator " "( ti ).<n> we find that a positive, nonsaturating and dominantly linear magnetoresistance can appear " "within quite wide magnetic - field range in the ti surface state having a positive and finite effective g " "- factor.<n> this linear magnetoresistance shows up in the system of high carrier concentration and low " "mobility when electrons are in extended states and spread over many smeared landau levels, and persists " "up to room temperature, providing a possible mechanism for the recently observed linear magnetoresistance " "in topological insulator bi@xmath0se@xmath1 nanoribbons." ) generated = tokenizer.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) self.assertTrue(generated == [EXPECTED_LEP, EXPECTED_MAGNET]) class BigBirdPegasusStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=7, d_model=32, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, attention_type="original_full", use_bias=True, block_size=16, num_random_blocks=3, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 self.attention_type = attention_type self.use_bias = use_bias self.block_size = block_size self.num_random_blocks = num_random_blocks def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = BigBirdPegasusConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, attention_type=self.attention_type, use_bias=self.use_bias, block_size=self.block_size, num_random_blocks=self.num_random_blocks, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = BigBirdPegasusDecoder(config=config).to(torch_device).eval() outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = BigBirdPegasusDecoder(config=config).to(torch_device).eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=5e-1) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, lm_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class BigBirdPegasusStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BigBirdPegasusDecoder, BigBirdPegasusForCausalLM) if is_torch_available() else () all_generative_model_classes = (BigBirdPegasusForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = BigBirdPegasusStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BigBirdPegasusConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): return @unittest.skip("The model doesn't support left padding") def test_left_padding_compatibility(self): pass
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch biogpt model import math import unittest from transformers import biogptconfig istorchavailable from transformers testingutils import requiretorch slow torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import biogptforcausallm biogptforsequenceclassification biogptfortokenclassification biogptmodel biogpttokenizer from transformers models biogpt modelingbiogpt import biogptpretrainedmodelarchivelist class biogptmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidsfalse uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels def getconfigself return biogptconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model biogptmodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforcausallm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask model biogptforcausallmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckbiogptmodelattentionmaskpast self config inputids inputmask headmask tokentypeids args model biogptmodelconfigconfig model totorchdevice model eval create attention mask attnmask torch onesinputids shape dtypetorch long devicetorchdevice halfseqlength self seqlength 2 attnmask halfseqlength 0 first forward pass output past modelinputids attentionmaskattnmask totuple create hypothetical next token and extent to nextinputids nexttokens idstensorself batchsize 1 config vocabsize change a random masked slice from inputids randomseqidxtochange idstensor1 halfseqlength item 1 randomothernexttokens idstensorself batchsize 1 config vocabsize squeeze1 inputids randomseqidxtochange randomothernexttokens append to next inputids and attnmask nextinputids torch catinputids nexttokens dim1 attnmask torch cat attnmask torch onesattnmask shape0 1 dtypetorch long devicetorchdevice dim1 get two different outputs outputfromnopast modelnextinputids attentionmaskattnmasklasthiddenstate outputfrompast modelnexttokens pastkeyvaluespast attentionmaskattnmasklasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 1 randomsliceidx detach outputfrompastslice outputfrompast 0 randomsliceidx detach test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def createandcheckbiogptmodelpastlargeinputs self config inputids inputmask headmask tokentypeids args model biogptmodelconfigconfig totorchdevice eval attentionmask torch onesinputids shape dtypetorch long devicetorchdevice first forward pass outputs modelinputids attentionmaskattentionmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def createandcheckforwardandbackwards self config inputids inputmask headmask tokentypeids args gradientcheckpointingfalse model biogptforcausallmconfig model totorchdevice if gradientcheckpointing model gradientcheckpointingenable result modelinputids labelsinputids self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self seqlength self vocabsize result loss backward def createandcheckbiogptweightinitializationself config args model biogptmodelconfig modelstd model config initializerrange math sqrt2 model config numhiddenlayers for key in model statedict keys if cproj in key and weight in key self parent assertlessequalabstorch stdmodel statedictkey modelstd 0 001 self parent assertlessequalabstorch meanmodel statedictkey 0 0 0 01 def createandcheckbiogptfortokenclassification self config inputids inputmask headmask tokentypeids args config numlabels self numlabels model biogptfortokenclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids self parent assertequalresult logits shape self batchsize self seqlength self numlabels def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class biogptmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses biogptmodel biogptforcausallm biogptforsequenceclassification biogptfortokenclassification if istorchavailable else allgenerativemodelclasses biogptforcausallm if istorchavailable else pipelinemodelmapping featureextraction biogptmodel textclassification biogptforsequenceclassification textgeneration biogptforcausallm tokenclassification biogptfortokenclassification zeroshot biogptforsequenceclassification if istorchavailable else testpruning false def setupself self modeltester biogptmodeltesterself self configtester configtesterself configclassbiogptconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testbiogptmodelattmaskpastself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbiogptmodelattentionmaskpastconfigandinputs def testbiogptgradientcheckpointingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforwardandbackwardsconfigandinputs gradientcheckpointingtrue def testbiogptmodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbiogptmodelpastlargeinputsconfigandinputs def testbiogptweightinitializationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbiogptweightinitializationconfigandinputs def testbiogpttokenclassificationmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbiogptfortokenclassificationconfigandinputs slow def testbatchgenerationself model biogptforcausallm frompretrainedmicrosoftbiogpt model totorchdevice tokenizer biogpttokenizer frompretrainedmicrosoftbiogpt tokenizer paddingside left define pad token eos token 50256 tokenizer padtoken tokenizer eostoken model config padtokenid model config eostokenid use different length sentences to test batching sentences hello my dog is a little today i inputs tokenizersentences returntensorspt paddingtrue inputids inputsinputids totorchdevice outputs model generate inputidsinputids attentionmaskinputsattentionmask totorchdevice inputsnonpadded tokenizersentences0 returntensorspt inputids totorchdevice outputnonpadded model generateinputidsinputsnonpadded numpaddings inputsnonpadded shape1 inputsattentionmask1 long sum cpu item inputspadded tokenizersentences1 returntensorspt inputids totorchdevice outputpadded model generateinputidsinputspadded maxlengthmodel config maxlength numpaddings batchoutsentence tokenizer batchdecodeoutputs skipspecialtokenstrue nonpaddedsentence tokenizer decodeoutputnonpadded0 skipspecialtokenstrue paddedsentence tokenizer decodeoutputpadded0 skipspecialtokenstrue expectedoutputsentence hello my dog is a little bit bigger than a little bit today i have a good idea of how to use the information self assertlistequalexpectedoutputsentence batchoutsentence self assertlistequalexpectedoutputsentence nonpaddedsentence paddedsentence slow def testmodelfrompretrainedself for modelname in biogptpretrainedmodelarchivelist 1 model biogptmodel frompretrainedmodelname self assertisnotnonemodel copied from tests models opt testmodelingopt optmodeltest testoptsequenceclassificationmodel with optbiogpt optbiogpt prepareconfigandinputsprepareconfigandinputsforcommon def testbiogptsequenceclassificationmodelself config inputdict self modeltester prepareconfigandinputsforcommon config numlabels 3 inputids inputdictinputids attentionmask inputids ne1 totorchdevice sequencelabels idstensorself modeltester batchsize self modeltester typesequencelabelsize model biogptforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskattentionmask labelssequencelabels self assertequalresult logits shape self modeltester batchsize self modeltester numlabels copied from tests models opt testmodelingopt optmodeltest testoptsequenceclassificationmodelformultilabel with optbiogpt optbiogpt prepareconfigandinputsprepareconfigandinputsforcommon def testbiogptsequenceclassificationmodelformultilabelself config inputdict self modeltester prepareconfigandinputsforcommon config numlabels 3 config problemtype multilabelclassification inputids inputdictinputids attentionmask inputids ne1 totorchdevice sequencelabels idstensor self modeltester batchsize config numlabels self modeltester typesequencelabelsize totorch float model biogptforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskattentionmask labelssequencelabels self assertequalresult logits shape self modeltester batchsize self modeltester numlabels requiretorch class biogptmodelintegrationtestunittest testcase slow def testinferencelmheadmodelself model biogptforcausallm frompretrainedmicrosoftbiogpt inputids torch tensor2 4805 9 656 21 output modelinputids0 vocabsize 42384 expectedshape torch size1 5 vocabsize self assertequaloutput shape expectedshape expectedslice torch tensor 9 5236 9 8918 10 4557 11 0469 9 6423 8 1022 8 8664 7 8826 5 5325 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 slow def testbiogptgenerationself tokenizer biogpttokenizer frompretrainedmicrosoftbiogpt model biogptforcausallm frompretrainedmicrosoftbiogpt model totorchdevice torch manualseed0 tokenized tokenizercovid19 is returntensorspt totorchdevice outputids model generate tokenized minlength100 maxlength1024 numbeams5 earlystoppingtrue outputstr tokenizer decodeoutputids0 skipspecialtokenstrue expectedoutputstr covid19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 sarscov2 the causative agent of coronavirus disease 2019 covid19 which has spread to more than 200 countries and territories including the united states us canada australia new zealand the united kingdom uk and the united states of america usa as of march 11 2020 with more than 800 000 confirmed cases and more than 800 000 deaths self assertequaloutputstr expectedoutputstr coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch biogpt model create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice define pad token eos token 50256 use different length sentences to test batching copied from tests models opt test_modeling_opt optmodeltest test_opt_sequence_classification_model with opt biogpt opt biogpt prepare_config_and_inputs prepare_config_and_inputs_for_common copied from tests models opt test_modeling_opt optmodeltest test_opt_sequence_classification_model_for_multi_label with opt biogpt opt biogpt prepare_config_and_inputs prepare_config_and_inputs_for_common
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class BioGptModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return BioGptConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BioGptModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = BioGptForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_biogpt_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = BioGptModel(config=config) model.to(torch_device) model.eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 output, past = model(input_ids, attention_mask=attn_mask).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_biogpt_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = BioGptModel(config=config).to(torch_device).eval() attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = BioGptForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_biogpt_weight_initialization(self, config, *args): model = BioGptModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def create_and_check_biogpt_for_token_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): config.num_labels = self.num_labels model = BioGptForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BioGptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) all_generative_model_classes = (BioGptForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False def setUp(self): self.model_tester = BioGptModelTester(self) self.config_tester = ConfigTester(self, config_class=BioGptConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_biogpt_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*config_and_inputs) def test_biogpt_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_biogpt_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*config_and_inputs) def test_biogpt_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*config_and_inputs) def test_biogpt_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*config_and_inputs) @slow def test_batch_generation(self): model = BioGptForCausalLM.from_pretrained("microsoft/biogpt") model.to(torch_device) tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_model_from_pretrained(self): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BioGptModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_biogpt_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = BioGptForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_biogpt_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = BioGptForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @require_torch class BioGptModelIntegrationTest(unittest.TestCase): @slow def test_inference_lm_head_model(self): model = BioGptForCausalLM.from_pretrained("microsoft/biogpt") input_ids = torch.tensor([[2, 4805, 9, 656, 21]]) output = model(input_ids)[0] vocab_size = 42384 expected_shape = torch.Size((1, 5, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_biogpt_generation(self): tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") model = BioGptForCausalLM.from_pretrained("microsoft/biogpt") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("COVID-19 is", return_tensors="pt").to(torch_device) output_ids = model.generate( **tokenized, min_length=100, max_length=1024, num_beams=5, early_stopping=True, ) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license adapted from sennrich et al 2015 and https github comrsennrichsubwordnmt adapted from sennrich et al 2015 and https github comrsennrichsubwordnmt tokenizer biogpttokenizerself vocabfile self mergesfile text lower bpetokens low erw tokens tokenizer tokenizetext self assertlistequaltokens bpetokens inputtokens tokens unk inputbpetokens 14 15 20 self assertlistequaltokenizer converttokenstoidsinputtokens inputbpetokens slow def testsequencebuildersself tokenizer biogpttokenizer frompretrainedmicrosoftbiogpt text tokenizer encodesequence builders addspecialtokensfalse text2 tokenizer encodemultisequence build addspecialtokensfalse encodedsentence tokenizer buildinputswithspecialtokenstext encodedpair tokenizer buildinputswithspecialtokenstext text2 self asserttrueencodedsentence 2 text self asserttrueencodedpair 2 text 2 text2 coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license adapted from sennrich et al 2015 and https github com rsennrich subword nmt adapted from sennrich et al 2015 and https github com rsennrich subword nmt
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class BioGptTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BioGptTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = BioGptTokenizer(self.vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) self.assertTrue(encoded_sentence == [2] + text) self.assertTrue(encoded_pair == [2] + text + [2] + text_2)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bit model import unittest from transformers import bitconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testbackbonecommon import backbonetestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import bitbackbone bitforimageclassification bitimageprocessor bitmodel from transformers models bit modelingbit import bitpretrainedmodelarchivelist if isvisionavailable from pil import image class bitmodeltester def init self parent batchsize3 imagesize32 numchannels3 embeddingssize10 hiddensizes8 16 32 64 depths1 1 2 1 istrainingtrue uselabelstrue hiddenactrelu numlabels3 scopenone outfeaturesstage2 stage3 stage4 outindices2 3 4 numgroups1 self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self embeddingssize embeddingssize self hiddensizes hiddensizes self depths depths self istraining istraining self uselabels uselabels self hiddenact hiddenact self numlabels numlabels self scope scope self numstages lenhiddensizes self outfeatures outfeatures self outindices outindices self numgroups numgroups def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself return bitconfig numchannelsself numchannels embeddingssizeself embeddingssize hiddensizesself hiddensizes depthsself depths hiddenactself hiddenact numlabelsself numlabels outfeaturesself outfeatures outindicesself outindices numgroupsself numgroups def createandcheckmodelself config pixelvalues labels model bitmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result lasthiddenstate shape self batchsize self hiddensizes1 self imagesize 32 self imagesize 32 def createandcheckforimageclassificationself config pixelvalues labels config numlabels self numlabels model bitforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckbackboneself config pixelvalues labels model bitbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps lenconfig outfeatures self parent assertlistequallistresult featuremaps0 shape self batchsize self hiddensizes1 4 4 verify channels self parent assertequallenmodel channels lenconfig outfeatures self parent assertlistequalmodel channels config hiddensizes1 verify backbone works with outfeaturesnone config outfeatures none model bitbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequallistresult featuremaps0 shape self batchsize self hiddensizes1 1 1 verify channels self parent assertequallenmodel channels 1 self parent assertlistequalmodel channels config hiddensizes1 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class bitmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses bitmodel bitforimageclassification bitbackbone if istorchavailable else pipelinemodelmapping featureextraction bitmodel imageclassification bitforimageclassification if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester bitmodeltesterself self configtester configtesterself configclassbitconfig hastextmodalityfalse def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return unittest skipreasonbit does not output attentions def testattentionoutputsself pass unittest skipreasonbit does not use inputsembeds def testinputsembedsself pass unittest skipreasonbit does not support input and output embeddings def testmodelcommonattributesself pass def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testbackboneself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbackboneconfigandinputs def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfigconfig for name module in model namedmodules if isinstancemodule nn batchnorm2d nn groupnorm self asserttrue torch allmodule weight 1 msgfparameter name of model modelclass seems not properly initialized self asserttrue torch allmodule bias 0 msgfparameter name of model modelclass seems not properly initialized def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumstages self modeltester numstages self assertequallenhiddenstates expectednumstages 1 bit s feature maps are of shape batchsize numchannels height width self assertlistequal listhiddenstates0 shape2 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon layerstype preactivation bottleneck for modelclass in self allmodelclasses for layertype in layerstype config layertype layertype inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass unittest skipreasonbit does not use feedforward chunking def testfeedforwardchunkingself pass def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in bitpretrainedmodelarchivelist 1 model bitmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class bitmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return bitimageprocessor frompretrainedbitpretrainedmodelarchivelist0 if isvisionavailable else none slow def testinferenceimageclassificationheadself model bitforimageclassification frompretrainedbitpretrainedmodelarchivelist0 totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 6526 0 5263 1 4398 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 requiretorch class bitbackbonetestbackbonetestermixin unittest testcase allmodelclasses bitbackbone if istorchavailable else configclass bitconfig hasattentions false def setupself self modeltester bitmodeltesterself coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bit model verify feature maps verify channels verify backbone works with out_features none verify feature maps verify channels here we also overwrite some of the tests of test_modeling_common py as bit does not use input_ids inputs_embeds attention_mask and seq_length bit s feature maps are of shape batch_size num_channels height width check that output_hidden_states also work using config we will verify our results on an image of cute cats forward pass verify the logits
import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class BitModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[8, 16, 32, 64], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], num_groups=1, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) self.out_features = out_features self.out_indices = out_indices self.num_groups = num_groups def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return BitConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, num_groups=self.num_groups, ) def create_and_check_model(self, config, pixel_values, labels): model = BitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = BitForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = BitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) config.out_features = None model = BitBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class BitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = BitModelTester(self) self.config_tester = ConfigTester(self, config_class=BitConfig, has_text_modality=False) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="Bit does not output attentions") def test_attention_outputs(self): pass @unittest.skip(reason="Bit does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Bit does not support input and output embeddings") def test_model_common_attributes(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, module in model.named_modules(): if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertTrue( torch.all(module.bias == 0), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="Bit does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BitModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class BitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @require_torch class BitBackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (BitBackbone,) if is_torch_available() else () config_class = BitConfig has_attentions = False def setUp(self): self.model_tester = BitModelTester(self)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blenderbot model import tempfile import unittest from transformers import blenderbotconfig istorchavailable from transformers testingutils import backendemptycache requiresentencepiece requiretokenizers requiretorch requiretorchfp16 slow torchdevice from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import blenderbotforconditionalgeneration blenderbotmodel blenderbottokenizer from transformers models blenderbot modelingblenderbot import blenderbotdecoder blenderbotencoder blenderbotforcausallm def prepareblenderbotinputsdict config inputids decoderinputids attentionmasknone decoderattentionmasknone headmasknone decoderheadmasknone crossattnheadmasknone if attentionmask is none attentionmask inputids neconfig padtokenid if decoderattentionmask is none decoderattentionmask decoderinputids neconfig padtokenid if headmask is none headmask torch onesconfig encoderlayers config encoderattentionheads devicetorchdevice if decoderheadmask is none decoderheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice if crossattnheadmask is none crossattnheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice return inputids inputids decoderinputids decoderinputids attentionmask attentionmask decoderattentionmask attentionmask headmask headmask decoderheadmask decoderheadmask crossattnheadmask crossattnheadmask class blenderbotmodeltester def init self parent batchsize13 seqlength7 istrainingtrue uselabelsfalse vocabsize99 hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings50 eostokenid2 padtokenid1 bostokenid0 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid forcing a certain token to be generated sets all other tokens to inf if however the token to be generated is already at inf then it can lead token nan values and thus break generation self forcedbostokenid none self forcedeostokenid none def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize clamp 3 inputids 1 self eostokenid eos token decoderinputids idstensorself batchsize self seqlength self vocabsize config self getconfig inputsdict prepareblenderbotinputsdictconfig inputids decoderinputids return config inputsdict def getconfigself return blenderbotconfig vocabsizeself vocabsize dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid forcedbostokenidself forcedbostokenid forcedeostokenidself forcedeostokenid def getpipelineconfigself config self getconfig config maxpositionembeddings 100 config vocabsize 300 return config def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def createandcheckdecodermodelpastlargeinputsself config inputsdict model blenderbotmodelconfigconfig getdecoder totorchdevice eval inputids inputsdictinputids attentionmask inputsdictattentionmask headmask inputsdictheadmask first forward pass outputs modelinputids attentionmaskattentionmask headmaskheadmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def checkencoderdecodermodelstandaloneself config inputsdict model blenderbotmodelconfigconfig totorchdevice eval outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder blenderbotencoder frompretrainedtmpdirname totorchdevice encoderlasthiddenstate2 encoderinputsdictinputids attentionmaskinputsdictattentionmask 0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max item 1e3 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder blenderbotdecoder frompretrainedtmpdirname totorchdevice lasthiddenstate2 decoder inputidsinputsdictdecoderinputids attentionmaskinputsdictdecoderattentionmask encoderhiddenstatesencoderlasthiddenstate encoderattentionmaskinputsdictattentionmask 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max item 1e3 requiretorch class blenderbotmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses blenderbotmodel blenderbotforconditionalgeneration if istorchavailable else allgenerativemodelclasses blenderbotforconditionalgeneration if istorchavailable else pipelinemodelmapping conversational blenderbotforconditionalgeneration featureextraction blenderbotmodel summarization blenderbotforconditionalgeneration textgeneration blenderbotforcausallm text2textgeneration blenderbotforconditionalgeneration translation blenderbotforconditionalgeneration if istorchavailable else isencoderdecoder true fxcompatible true testpruning false testmissingkeys false def setupself self modeltester blenderbotmodeltesterself self configtester configtesterself configclassblenderbotconfig def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testencoderdecodermodelstandaloneself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkencoderdecodermodelstandaloneconfigandinputs requiretorchfp16 def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs inputids inputdictinputids attentionmask inputids ne1 totorchdevice model blenderbotforconditionalgenerationconfig eval totorchdevice model half model generateinputids attentionmaskattentionmask model generatenumbeams4 dosampletrue earlystoppingfalse numreturnsequences3 def asserttensorsclosea b atol1e12 prefix for common tests first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice create attention mask first forward pass pastkeyvalues modelinputids usecachetruepastkeyvalues create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice decoder cannot keep gradients coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blenderbot model forcing a certain token to be generated sets all other tokens to inf if however the token to be generated is already at inf then it can lead token nan values and thus break generation eos token first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice if tensors have different shapes different values or a and b are not both tensors raise a nice assertion error for common tests first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice create attention mask first forward pass past_key_values model input_ids use_cache true past_key_values create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice decoder cannot keep gradients and it s not used enough to be worth fixing
import tempfile import unittest from transformers import BlenderbotConfig, is_torch_available from transformers.testing_utils import ( backend_empty_cache, require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotTokenizer from transformers.models.blenderbot.modeling_blenderbot import ( BlenderbotDecoder, BlenderbotEncoder, BlenderbotForCausalLM, ) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class BlenderbotModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.forced_bos_token_id = None self.forced_eos_token_id = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return BlenderbotConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, forced_bos_token_id=self.forced_bos_token_id, forced_eos_token_id=self.forced_eos_token_id, ) def get_pipeline_config(self): config = self.get_config() config.max_position_embeddings = 100 config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BlenderbotModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = BlenderbotModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = BlenderbotEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = BlenderbotDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class BlenderbotModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotModel, BlenderbotForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (BlenderbotForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": BlenderbotForConditionalGeneration, "feature-extraction": BlenderbotModel, "summarization": BlenderbotForConditionalGeneration, "text-generation": BlenderbotForCausalLM, "text2text-generation": BlenderbotForConditionalGeneration, "translation": BlenderbotForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = BlenderbotModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BlenderbotForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) @unittest.skipUnless(torch_device != "cpu", "3B test too slow on CPU.") @require_torch @require_sentencepiece @require_tokenizers class Blenderbot3BIntegrationTests(unittest.TestCase): ckpt = "facebook/blenderbot-3B" @cached_property def tokenizer(self): return BlenderbotTokenizer.from_pretrained(self.ckpt) @slow def test_generation_from_short_input_same_as_parlai_3B(self): FASTER_GEN_KWARGS = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} TOK_DECODE_KW = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} backend_empty_cache(torch_device) model = BlenderbotForConditionalGeneration.from_pretrained(self.ckpt).half().to(torch_device) src_text = ["Sam"] model_inputs = self.tokenizer(src_text, return_tensors="pt").to(torch_device) generated_utterances = model.generate(**model_inputs, **FASTER_GEN_KWARGS) tgt_text = 'Sam is a great name. It means "sun" in Gaelic.' generated_txt = self.tokenizer.batch_decode(generated_utterances, **TOK_DECODE_KW) assert generated_txt[0].strip() == tgt_text src_text = ( "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel" " like i'm going to throw up.\nand why is that?" ) model_inputs = self.tokenizer([src_text], return_tensors="pt").to(torch_device) generated_ids = model.generate(**model_inputs, **FASTER_GEN_KWARGS)[0] reply = self.tokenizer.decode(generated_ids, **TOK_DECODE_KW) assert "I think it's because we are so worried about what people think of us." == reply.strip() del model class BlenderbotStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, encoder_no_repeat_ngram_size=0, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.encoder_no_repeat_ngram_size = encoder_no_repeat_ngram_size self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = BlenderbotConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, encoder_no_repeat_ngram_size=self.encoder_no_repeat_ngram_size, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = BlenderbotDecoder(config=config).to(torch_device).eval() outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = BlenderbotDecoder(config=config).to(torch_device).eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class BlenderbotStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotDecoder, BlenderbotForCausalLM) if is_torch_available() else () all_generative_model_classes = (BlenderbotForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = BlenderbotStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BlenderbotConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): return @unittest.skip("The model doesn't support left padding") def test_left_padding_compatibility(self): pass
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs ioenlatestgpumemoryallocation html timeoutdecorator timeout1 not working with the decorator so far flaxblenderbotforsequenceclassification expects eos token in inputids 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs io en latest gpu_memory_allocation html note padding timeout_decorator timeout 1 not working with the decorator so far flaxblenderbotforsequenceclassification expects eos token in input_ids
import unittest import numpy as np import timeout_decorator from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) if decoder_attention_mask is None: decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class FlaxBlenderbotModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) decoder_input_ids = shift_tokens_right(input_ids, 1, 2) config = BlenderbotConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class BlenderbotHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=np.int64, ) batch_size = input_ids.shape[0] config = BlenderbotConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_model = FlaxBlenderbotForConditionalGeneration(config) outputs = lm_model(input_ids=input_ids) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_lm_uneven_forward(self): config = BlenderbotConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = FlaxBlenderbotForConditionalGeneration(config) context = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.int64) summary = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.int64) outputs = lm_model(input_ids=context, decoder_input_ids=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_shift_tokens_right(self): input_ids = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.int64) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = np.equal(input_ids, 1).astype(np.float32).sum() n_pad_after = np.equal(shifted, 1).astype(np.float32).sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0], 2).all()) @require_flax class FlaxBlenderbotModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): is_encoder_decoder = True all_model_classes = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) all_generative_model_classes = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxBlenderbotModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/blenderbot-400M-distill") input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs) @unittest.skipUnless(jax_device != "cpu", "3B test too slow on CPU.") @slow def test_generation_from_short_input_same_as_parlai_3B(self): FASTER_GEN_KWARGS = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} TOK_DECODE_KW = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B", from_pt=True) tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") src_text = ["Sam"] model_inputs = tokenizer(src_text, return_tensors="jax") generated_utterances = model.generate(**model_inputs, **FASTER_GEN_KWARGS) tgt_text = 'Sam is a great name. It means "sun" in Gaelic.' generated_txt = tokenizer.batch_decode(generated_utterances, **TOK_DECODE_KW) assert generated_txt[0].strip() == tgt_text
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class TFBlenderbotModelTester: config_cls = BlenderbotConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFBlenderbotModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFBlenderbotModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () all_generative_model_classes = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = False def setUp(self): self.model_tester = TFBlenderbotModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @require_tokenizers @require_tf class TFBlenderbot400MIntegrationTests(unittest.TestCase): src_text = ["My friends are cool but they eat too many carbs."] model_name = "facebook/blenderbot-400M-distill" @cached_property def tokenizer(self): return BlenderbotTokenizer.from_pretrained(self.model_name) @cached_property def model(self): model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) return model @slow def test_generation_from_long_input(self): model_inputs = self.tokenizer(self.src_text, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, ) generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
usrbinenv python3 codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for blenderbot tokenizers including common tests for blenderbotsmalltokenizer import unittest from transformers import blenderbottokenizer blenderbottokenizerfast from transformers testingutils import requirejinja from transformers utils import cachedproperty class blenderbot3btokenizertestsunittest testcase cachedproperty def tokenizer3bself return blenderbottokenizer frompretrainedfacebookblenderbot3b cachedproperty def rusttokenizer3bself return blenderbottokenizerfast frompretrainedfacebookblenderbot3b def testencodedecodecycleself tok self tokenizer3b srctext i am a small frog encoded toksrctext paddingfalse truncationfalseinputids decoded tok batchdecodeencoded skipspecialtokenstrue cleanuptokenizationspacesfalse0 assert srctext decoded def testencodedecodecyclerusttokenizerself tok self rusttokenizer3b srctext i am a small frog encoded toksrctext paddingfalse truncationfalseinputids decoded tok batchdecodeencoded skipspecialtokenstrue cleanuptokenizationspacesfalse0 assert srctext decoded def test3btokenizationsameasparlaiself assert self tokenizer3b addprefixspace assert self tokenizer3b sam sam inputids 5502 2 5502 2 def test3btokenizationsameasparlairusttokenizerself assert self rusttokenizer3b addprefixspace assert self rusttokenizer3b sam sam inputids 5502 2 5502 2 requirejinja def testtokenizationforchatself tok self tokenizer3b testchats role system content you are a helpful chatbot role user content hello role system content you are a helpful chatbot role user content hello role assistant content nice to meet you role assistant content nice to meet you role user content hello tokenizedchats tok applychattemplatetestchat for testchat in testchats expectedtokens 553 366 265 4792 3879 73 311 21 228 228 6950 8 2 553 366 265 4792 3879 73 311 21 228 228 6950 8 228 3490 287 2273 304 21 2 3490 287 2273 304 21 228 228 6950 8 2 for tokenizedchat expectedtokens in ziptokenizedchats expectedtokens self assertlistequaltokenizedchat expectedtokens usr bin env python3 coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for blenderbot tokenizers including common tests for blenderbotsmalltokenizer
import unittest from transformers import BlenderbotTokenizer, BlenderbotTokenizerFast from transformers.testing_utils import require_jinja from transformers.utils import cached_property class Blenderbot3BTokenizerTests(unittest.TestCase): @cached_property def tokenizer_3b(self): return BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") @cached_property def rust_tokenizer_3b(self): return BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B") def test_encode_decode_cycle(self): tok = self.tokenizer_3b src_text = " I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text == decoded def test_encode_decode_cycle_rust_tokenizer(self): tok = self.rust_tokenizer_3b src_text = " I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text == decoded def test_3B_tokenization_same_as_parlai(self): assert self.tokenizer_3b.add_prefix_space assert self.tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]] def test_3B_tokenization_same_as_parlai_rust_tokenizer(self): assert self.rust_tokenizer_3b.add_prefix_space assert self.rust_tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]] @require_jinja def test_tokenization_for_chat(self): tok = self.tokenizer_3b test_chats = [ [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}], [ {"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Nice to meet you."}, ], [{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}], ] tokenized_chats = [tok.apply_chat_template(test_chat) for test_chat in test_chats] expected_tokens = [ [553, 366, 265, 4792, 3879, 73, 311, 21, 228, 228, 6950, 8, 2], [553, 366, 265, 4792, 3879, 73, 311, 21, 228, 228, 6950, 8, 228, 3490, 287, 2273, 304, 21, 2], [3490, 287, 2273, 304, 21, 228, 228, 6950, 8, 2], ] for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens): self.assertListEqual(tokenized_chat, expected_tokens)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blenderbotsmall model import tempfile import unittest from transformers import blenderbotsmallconfig istorchavailable from transformers testingutils import requiretorch requiretorchfp16 slow torchdevice from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import blenderbotsmallforconditionalgeneration blenderbotsmallmodel blenderbotsmalltokenizer from transformers models blenderbotsmall modelingblenderbotsmall import blenderbotsmalldecoder blenderbotsmallencoder blenderbotsmallforcausallm def prepareblenderbotsmallinputsdict config inputids decoderinputids attentionmasknone decoderattentionmasknone headmasknone decoderheadmasknone crossattnheadmasknone if attentionmask is none attentionmask inputids neconfig padtokenid if decoderattentionmask is none decoderattentionmask decoderinputids neconfig padtokenid if headmask is none headmask torch onesconfig encoderlayers config encoderattentionheads devicetorchdevice if decoderheadmask is none decoderheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice if crossattnheadmask is none crossattnheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice return inputids inputids decoderinputids decoderinputids attentionmask attentionmask decoderattentionmask attentionmask headmask headmask decoderheadmask decoderheadmask crossattnheadmask crossattnheadmask class blenderbotsmallmodeltester def init self parent batchsize13 seqlength7 istrainingtrue uselabelsfalse vocabsize99 hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings50 eostokenid2 padtokenid1 bostokenid0 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid forcing a certain token to be generated sets all other tokens to inf if however the token to be generated is already at inf then it can lead token nan values and thus break generation self forcedbostokenid none self forcedeostokenid none def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize clamp 3 inputids 1 self eostokenid eos token decoderinputids idstensorself batchsize self seqlength self vocabsize config self getconfig inputsdict prepareblenderbotsmallinputsdictconfig inputids decoderinputids return config inputsdict def getconfigself return blenderbotsmallconfig vocabsizeself vocabsize dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid forcedbostokenidself forcedbostokenid forcedeostokenidself forcedeostokenid def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def createandcheckdecodermodelpastlargeinputsself config inputsdict model blenderbotsmallmodelconfigconfig getdecoder totorchdevice eval inputids inputsdictinputids attentionmask inputsdictattentionmask headmask inputsdictheadmask first forward pass outputs modelinputids attentionmaskattentionmask headmaskheadmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def checkencoderdecodermodelstandaloneself config inputsdict model blenderbotsmallmodelconfigconfig totorchdevice eval outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder blenderbotsmallencoder frompretrainedtmpdirname totorchdevice encoderlasthiddenstate2 encoderinputsdictinputids attentionmaskinputsdictattentionmask 0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max item 1e3 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder blenderbotsmalldecoder frompretrainedtmpdirname totorchdevice lasthiddenstate2 decoder inputidsinputsdictdecoderinputids attentionmaskinputsdictdecoderattentionmask encoderhiddenstatesencoderlasthiddenstate encoderattentionmaskinputsdictattentionmask 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max item 1e3 requiretorch class blenderbotsmallmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses blenderbotsmallmodel blenderbotsmallforconditionalgeneration if istorchavailable else allgenerativemodelclasses blenderbotsmallforconditionalgeneration if istorchavailable else pipelinemodelmapping conversational blenderbotsmallforconditionalgeneration featureextraction blenderbotsmallmodel summarization blenderbotsmallforconditionalgeneration textgeneration blenderbotsmallforcausallm text2textgeneration blenderbotsmallforconditionalgeneration translation blenderbotsmallforconditionalgeneration if istorchavailable else isencoderdecoder true fxcompatible true testpruning false testmissingkeys false todo fix the failed tests when this model gets more usage def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname return pipelinetestcassename in textgenerationpipelinetests conversationalpipelinetests def setupself self modeltester blenderbotsmallmodeltesterself self configtester configtesterself configclassblenderbotsmallconfig def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testencoderdecodermodelstandaloneself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkencoderdecodermodelstandaloneconfigandinputs requiretorchfp16 def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs inputids inputdictinputids attentionmask inputids ne1 totorchdevice model blenderbotsmallforconditionalgenerationconfig eval totorchdevice model half model generateinputids attentionmaskattentionmask model generatenumbeams4 dosampletrue earlystoppingfalse numreturnsequences3 def asserttensorsclosea b atol1e12 prefix for common tests first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice decoder cannot keep gradients coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blenderbotsmall model forcing a certain token to be generated sets all other tokens to inf if however the token to be generated is already at inf then it can lead token nan values and thus break generation eos token first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice todo fix the failed tests when this model gets more usage if tensors have different shapes different values or a and b are not both tensors raise a nice assertion error for common tests first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice decoder cannot keep gradients and it s not used enough to be worth fixing
import tempfile import unittest from transformers import BlenderbotSmallConfig, is_torch_available from transformers.testing_utils import ( require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallTokenizer from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallDecoder, BlenderbotSmallEncoder, BlenderbotSmallForCausalLM, ) def prepare_blenderbot_small_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class BlenderbotSmallModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.forced_bos_token_id = None self.forced_eos_token_id = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, forced_bos_token_id=self.forced_bos_token_id, forced_eos_token_id=self.forced_eos_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BlenderbotSmallModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = BlenderbotSmallModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = BlenderbotSmallEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = BlenderbotSmallDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class BlenderbotSmallModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotSmallModel, BlenderbotSmallForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (BlenderbotSmallForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": BlenderbotSmallForConditionalGeneration, "feature-extraction": BlenderbotSmallModel, "summarization": BlenderbotSmallForConditionalGeneration, "text-generation": BlenderbotSmallForCausalLM, "text2text-generation": BlenderbotSmallForConditionalGeneration, "translation": BlenderbotSmallForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return pipeline_test_casse_name in ("TextGenerationPipelineTests", "ConversationalPipelineTests") def setUp(self): self.model_tester = BlenderbotSmallModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BlenderbotSmallForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) @require_torch class Blenderbot90MIntegrationTests(unittest.TestCase): ckpt = "facebook/blenderbot-90M" @cached_property def model(self): model = BlenderbotSmallForConditionalGeneration.from_pretrained(self.ckpt).to(torch_device) if torch_device == "cuda": model = model.half() return model @cached_property def tokenizer(self): return BlenderbotSmallTokenizer.from_pretrained(self.ckpt) @slow def test_90_generation_from_long_input(self): src_text = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel" " like i'm going to throw up.\nand why is that?" ] model_inputs = self.tokenizer(src_text, return_tensors="pt").to(torch_device) assert isinstance(self.tokenizer, BlenderbotSmallTokenizer) generated_ids = self.model.generate(**model_inputs)[0] reply = self.tokenizer.decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) assert reply in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", ) @slow def test_90_generation_from_short_input(self): model_inputs = self.tokenizer(["sam"], return_tensors="pt").to(torch_device) generated_utterances = self.model.generate(**model_inputs) clean_txt = self.tokenizer.decode( generated_utterances[0], skip_special_tokens=True, clean_up_tokenization_spaces=True ) assert clean_txt in ( "have you ever been to a sam club? it's a great club in the south.", "have you ever heard of sam harris? he's an american singer, songwriter, and actor.", ) class BlenderbotSmallStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = BlenderbotSmallDecoder(config=config).to(torch_device).eval() outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = BlenderbotSmallDecoder(config=config).to(torch_device).eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class BlenderbotSmallStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotSmallDecoder, BlenderbotSmallForCausalLM) if is_torch_available() else () all_generative_model_classes = (BlenderbotSmallForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = BlenderbotSmallStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): return @unittest.skip("The model doesn't support left padding") def test_left_padding_compatibility(self): pass
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs ioenlatestgpumemoryallocation html timeoutdecorator timeout1 not working with the decorator so far flaxblenderbotforsequenceclassification expects eos token in inputids 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs io en latest gpu_memory_allocation html note padding timeout_decorator timeout 1 not working with the decorator so far flaxblenderbotforsequenceclassification expects eos token in input_ids
import unittest import numpy as np import timeout_decorator from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) if decoder_attention_mask is None: decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class FlaxBlenderbotSmallModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) decoder_input_ids = shift_tokens_right(input_ids, 1, 2) config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class BlenderbotHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=np.int64, ) batch_size = input_ids.shape[0] config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_model = FlaxBlenderbotSmallForConditionalGeneration(config) outputs = lm_model(input_ids=input_ids) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_lm_uneven_forward(self): config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = FlaxBlenderbotSmallForConditionalGeneration(config) context = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.int64) summary = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.int64) outputs = lm_model(input_ids=context, decoder_input_ids=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_shift_tokens_right(self): input_ids = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.int64) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = np.equal(input_ids, 1).astype(np.float32).sum() n_pad_after = np.equal(shifted, 1).astype(np.float32).sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0], 2).all()) @require_flax class FlaxBlenderbotSmallModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): is_encoder_decoder = True all_model_classes = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) all_generative_model_classes = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return pipeline_test_casse_name in ("TextGenerationPipelineTests", "ConversationalPipelineTests") def setUp(self): self.model_tester = FlaxBlenderbotSmallModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/blenderbot_small-90M") input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice use old tokenizer here because of bug when downloading new tokenizer coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice use old tokenizer here because of bug when downloading new tokenizer
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class TFBlenderbotSmallModelTester: config_cls = BlenderbotSmallConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFBlenderbotSmallModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_blenderbot_small_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFBlenderbotSmallModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) all_generative_model_classes = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return pipeline_test_casse_name in ("TextGenerationPipelineTests", "ConversationalPipelineTests") def setUp(self): self.model_tester = TFBlenderbotSmallModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @require_tokenizers @require_tf class TFBlenderbot90MIntegrationTests(unittest.TestCase): src_text = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] model_name = "facebook/blenderbot_small-90M" @cached_property def tokenizer(self): return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") @cached_property def model(self): model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) return model @slow def test_90_generation_from_long_input(self): model_inputs = self.tokenizer(self.src_text, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=True, ) generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fixme amy fixme amy fixme amy fixme amy
import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import BlipImageProcessor class BlipImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, do_pad=False, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): size = size if size is not None else {"height": 20, "width": 20} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, "do_pad": self.do_pad, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class BlipImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BlipImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = BlipImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_convert_rgb")) @require_torch @require_vision class BlipImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BlipImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = BlipImageProcessingTester(self, num_channels=4) self.expected_encoded_image_num_channels = 3 @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_convert_rgb")) @unittest.skip("BlipImageProcessor does not support 4 channels yet") def test_call_numpy(self): return super().test_call_numpy() @unittest.skip("BlipImageProcessor does not support 4 channels yet") def test_call_pytorch(self): return super().test_call_torch() @unittest.skip("BLIP doesn't treat 4 channel PIL and numpy consistently yet") def test_call_pil(self): pass @unittest.skip("BLIP doesn't treat 4 channel PIL and numpy consistently yet") def test_call_numpy_4_channels(self): pass
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blip model import inspect import os import tempfile import unittest import numpy as np import requests from transformers import blipconfig bliptextconfig blipvisionconfig from transformers testingutils import requiretorch requiretorchaccelerator requiretorchfp16 requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import blipforconditionalgeneration blipforimagetextretrieval blipforquestionanswering blipmodel bliptextmodel blipvisionmodel from transformers models blip modelingblip import blippretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import blipprocessor class blipvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange1e10 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return blipvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model blipvisionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class blipvisionmodeltestmodeltestermixin unittest testcase allmodelclasses blipvisionmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester blipvisionmodeltesterself self configtester configtesterself configclassblipvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonblip does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonblipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonblipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in blippretrainedmodelarchivelist 1 model blipvisionmodel frompretrainedmodelname self assertisnotnonemodel class bliptextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 bostokenid0 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope self bostokenid bostokenid def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return bliptextconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange bostokenidself bostokenid def createandcheckmodelself config inputids inputmask model bliptextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class bliptextmodeltestmodeltestermixin unittest testcase allmodelclasses bliptextmodel if istorchavailable else fxcompatible false testpruning false testheadmasking false def setupself self modeltester bliptextmodeltesterself self configtester configtesterself configclassbliptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonblip does not use inputsembeds def testinputsembedsself pass unittest skipreasonbliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonbliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in blippretrainedmodelarchivelist 1 model bliptextmodel frompretrainedmodelname self assertisnotnonemodel def testpttfmodelequivalenceself super testpttfmodelequivalenceallowmissingkeystrue class blipmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester bliptextmodeltesterparent textkwargs self visionmodeltester blipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return blipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model blipmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues returnloss true return config inputsdict requiretorch class blipmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses blipmodel if istorchavailable else pipelinemodelmapping featureextraction blipmodel imagetotext blipforconditionalgeneration if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false def setupself self modeltester blipmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonblipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass override as the logitscale parameter initilization is different for blip def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues blip needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save blipconfig and check if we can load blipvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig blipvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save blipconfig and check if we can load bliptextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig bliptextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in blippretrainedmodelarchivelist 1 model blipmodel frompretrainedmodelname self assertisnotnonemodel def testpttfmodelequivalenceself super testpttfmodelequivalenceallowmissingkeystrue class bliptextretrievalmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester bliptextmodeltesterparent textkwargs self visionmodeltester blipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return blipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model blipmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues return config inputsdict class bliptextimagemodelsmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester bliptextmodeltesterparent textkwargs self visionmodeltester blipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return blipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model blipmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids labels inputids attentionmask attentionmask pixelvalues pixelvalues return config inputsdict class blipvqamodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester bliptextmodeltesterparent textkwargs self visionmodeltester blipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return blipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model blipmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids labels inputids decoderinputids inputids attentionmask attentionmask pixelvalues pixelvalues return config inputsdict requiretorch requirevision class blipvqamodeltestmodeltestermixin unittest testcase allmodelclasses blipforquestionanswering if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testtorchscript false def setupself self modeltester blipvqamodeltesterself def prepareinputsforvqaself inputsdict self modeltester prepareconfigandinputsforcommon inputsdictlabels inputsdictinputids inputsdictdecoderinputids inputsdictinputids inputsdict popreturnloss return inputsdict def testclassnameconsistencyself for modelclass in self allmodelclasses model modelclassself modeltester getconfig self asserttrue model class name endswithforquestionanswering fclass name should end with forvisualquestionanswering got model class name def testtrainingself for modelclass in self allmodelclasses model modelclassself modeltester getconfig totorchdevice model train loss modelself modeltester prepareconfigandinputsforcommon1 loss loss backward verify the gradients are not none for name param in model namedparameters self assertisnotnoneparam grad fgradients should not be none got param grad for name def testforwardsignatureself for modelclass in self allmodelclasses model modelclassself modeltester getconfig signature inspect signaturemodel forward signature parameters is an ordereddict so args are the first n entries args listsignature parameters keys expectedargs inputids attentionmask labels decoderinputids decoderattentionmask for arg in expectedargs self asserttrue arg in args fargument arg of forward function signature should include arg found args unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonblipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass requiretorch class bliptextretrievalmodeltestmodeltestermixin unittest testcase allmodelclasses blipforimagetextretrieval if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testtorchscript false def setupself self modeltester bliptextretrievalmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonblipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys if model config isencoderdecoder expectedargnames inputids attentionmask decoderinputids decoderattentionmask expectedargnames extend headmask decoderheadmask crossattnheadmask encoderoutputs if headmask and decoderheadmask and crossattnheadmask in argnames else encoderoutputs self assertlistequalargnames lenexpectedargnames expectedargnames else expectedargnames inputids if modelclass blipforconditionalgeneration else pixelvalues self assertlistequalargnames 1 expectedargnames def testtrainingself if not self modeltester istraining return for modelclass in self allmodelclasses 1 config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue hardcode labels to be the same as inputids inputslabels inputsinputids loss modelinputs loss loss backward def testtraininggradientcheckpointingself if not self modeltester istraining return for modelclass in self allmodelclasses 1 config inputsdict self modeltester prepareconfigandinputsforcommon config usecache false config returndict true model modelclassconfig model totorchdevice model gradientcheckpointingenable model train inputs self prepareforclassinputsdict modelclass returnlabelstrue hardcode labels to be the same as inputids inputslabels inputsinputids loss modelinputs loss loss backward unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass override as the logitscale parameter initilization is different for blip def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues blip needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save blipconfig and check if we can load blipvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig blipvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save blipconfig and check if we can load bliptextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig bliptextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in blippretrainedmodelarchivelist 1 model blipmodel frompretrainedmodelname self assertisnotnonemodel requiretorch class bliptextimagemodeltestmodeltestermixin unittest testcase allmodelclasses blipforconditionalgeneration if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testtorchscript false def setupself self modeltester bliptextimagemodelsmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonblipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys if model config isencoderdecoder expectedargnames inputids attentionmask decoderinputids decoderattentionmask expectedargnames extend headmask decoderheadmask crossattnheadmask encoderoutputs if headmask and decoderheadmask and crossattnheadmask in argnames else encoderoutputs self assertlistequalargnames lenexpectedargnames expectedargnames else expectedargnames inputids if modelclass blipforconditionalgeneration else pixelvalues self assertlistequalargnames 1 expectedargnames def testtrainingself if not self modeltester istraining return for modelclass in self allmodelclasses 1 config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue hardcode labels to be the same as inputids inputslabels inputsinputids loss modelinputs loss loss backward def testtraininggradientcheckpointingself if not self modeltester istraining return for modelclass in self allmodelclasses 1 config inputsdict self modeltester prepareconfigandinputsforcommon config usecache false config returndict true model modelclassconfig model totorchdevice model gradientcheckpointingenable model train inputs self prepareforclassinputsdict modelclass returnlabelstrue hardcode labels to be the same as inputids inputslabels inputsinputids loss modelinputs loss loss backward override as the logitscale parameter initilization is different for blip def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues blip needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save blipconfig and check if we can load blipvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig blipvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save blipconfig and check if we can load bliptextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig bliptextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in blippretrainedmodelarchivelist 1 model blipmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg url https huggingface cohfinternaltestingbliptestimageresolvemaindemo jpg im image openrequests geturl streamtrue raw return im requirevision requiretorch slow class blipmodelintegrationtestunittest testcase def testinferenceimagecaptioningself model blipforconditionalgeneration frompretrainedsalesforceblipimagecaptioningbase totorchdevice processor blipprocessor frompretrainedsalesforceblipimagecaptioningbase image prepareimg image only inputs processorimagesimage returntensorspt totorchdevice predictions model generateinputs test output self assertequalpredictions0 tolist 30522 1037 2450 3564 2006 1996 3509 2007 2014 3899 102 image and context context a picture of inputs processorimagesimage textcontext returntensorspt totorchdevice predictions model generateinputs test output self assertequal predictions0 tolist 30522 1037 3861 1997 1037 2450 1998 2014 3899 2006 1996 3509 102 requiretorchaccelerator requiretorchfp16 def testinferenceimagecaptioningfp16self model blipforconditionalgeneration frompretrained salesforceblipimagecaptioningbase torchdtypetorch float16 totorchdevice processor blipprocessor frompretrainedsalesforceblipimagecaptioningbase image prepareimg image only inputs processorimagesimage returntensorspt totorchdevice torch float16 predictions model generateinputs test output self assertequalpredictions0 tolist 30522 1037 2450 3564 2006 1996 3509 2007 2014 3899 102 image and context context a picture of inputs processorimagesimage textcontext returntensorspt totorchdevice torch float16 predictions model generateinputs test output self assertequal predictions0 tolist 30522 1037 3861 1997 1037 2450 1998 2014 3899 2006 1996 3509 102 def testinferencevqaself model blipforquestionanswering frompretrainedsalesforceblipvqabase totorchdevice processor blipprocessor frompretrainedsalesforceblipvqabase image prepareimg text how many dogs are in the picture inputs processorimage texttext returntensorspt totorchdevice out model generateinputs test output self assertequalout0 tolist 30522 1015 102 def testinferenceitmself model blipforimagetextretrieval frompretrainedsalesforceblipitmbasecoco totorchdevice processor blipprocessor frompretrainedsalesforceblipitmbasecoco image prepareimg text a woman and her dog sitting in a beach inputs processorimage text returntensorspt totorchdevice outitm modelinputs out modelinputs useitmheadfalse expectedscores torch tensor0 0029 0 9971 self asserttruetorch allclosetorch nn softmaxoutitm0 cpu expectedscores rtol1e3 atol1e3 self asserttruetorch allcloseout0 cpu torch tensor0 5162 rtol1e3 atol1e3 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blip model in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as blip does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic override as the logit_scale parameter initilization is different for blip check if logit_scale is initilized as per the original implementation to be sure we have no nan blip needs pixel_values save blipconfig and check if we can load blipvisionconfig from it save blipconfig and check if we can load bliptextconfig from it tests that all vqa models have a class name that ends with forquestionanswering tests that all vqa models can be trained on a single batch verify the gradients are not none test if the forward function has the expected arguments signature parameters is an ordereddict so args are the first n entries signature parameters is an ordereddict so arg_names order is deterministic hardcode labels to be the same as input_ids hardcode labels to be the same as input_ids override as the logit_scale parameter initilization is different for blip check if logit_scale is initilized as per the original implementation to be sure we have no nan blip needs pixel_values save blipconfig and check if we can load blipvisionconfig from it save blipconfig and check if we can load bliptextconfig from it signature parameters is an ordereddict so arg_names order is deterministic hardcode labels to be the same as input_ids hardcode labels to be the same as input_ids override as the logit_scale parameter initilization is different for blip check if logit_scale is initilized as per the original implementation to be sure we have no nan blip needs pixel_values save blipconfig and check if we can load blipvisionconfig from it save blipconfig and check if we can load bliptextconfig from it we will verify our results on an image of cute cats image only test output image and context test output image only test output image and context test output test output
import inspect import os import tempfile import unittest import numpy as np import requests from transformers import BlipConfig, BlipTextConfig, BlipVisionConfig from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipTextModel, BlipVisionModel, ) from transformers.models.blip.modeling_blip import BLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import BlipProcessor class BlipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return BlipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = BlipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class BlipVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = BlipVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="BlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class BlipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return BlipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) def create_and_check_model(self, config, input_ids, input_mask): model = BlipTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BlipTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = BlipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self): super().test_pt_tf_model_equivalence(allow_missing_keys=True) class BlipModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BlipModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": BlipModel, "image-to-text": BlipForConditionalGeneration} if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = BlipModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self): super().test_pt_tf_model_equivalence(allow_missing_keys=True) class BlipTextRetrievalModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipTextImageModelsModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "labels": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipVQAModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "labels": input_ids, "decoder_input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch @require_vision class BlipVQAModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipForQuestionAnswering,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = BlipVQAModelTester(self) def _prepare_inputs_for_vqa(self): _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict["labels"] = inputs_dict["input_ids"] inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict.pop("return_loss") return inputs_dict def test_class_name_consistency(self): for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) self.assertTrue( model.__class__.__name__.endswith("ForQuestionAnswering"), f"Class name should end with 'ForVisualQuestionAnswering' got {model.__class__.__name__}", ) def test_training(self): for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()).to(torch_device) model.train() loss = model(**self.model_tester.prepare_config_and_inputs_for_common()[1]).loss loss.backward() for name, param in model.named_parameters(): self.assertIsNotNone(param.grad, f"Gradients should not be None - got {param.grad} for {name}") def test_forward_signature(self): for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) signature = inspect.signature(model.forward) args = list(signature.parameters.keys()) expected_args = [ "input_ids", "attention_mask", "labels", "decoder_input_ids", "decoder_attention_mask", ] for arg in expected_args: self.assertTrue( arg in args, f"Argument {arg} of forward function signature should include {arg}. Found {args}.", ) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass @require_torch class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipForImageTextRetrieval,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = BlipTextRetrievalModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] if model_class != BlipForConditionalGeneration else ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = BlipTextImageModelsModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] if model_class != BlipForConditionalGeneration else ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch @slow class BlipModelIntegrationTest(unittest.TestCase): def test_inference_image_captioning(self): model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs) self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]) context = ["a picture of"] inputs = processor(images=image, text=context, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs) self.assertEqual( predictions[0].tolist(), [30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102], ) @require_torch_accelerator @require_torch_fp16 def test_inference_image_captioning_fp16(self): model = BlipForConditionalGeneration.from_pretrained( "Salesforce/blip-image-captioning-base", torch_dtype=torch.float16 ).to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device, torch.float16) predictions = model.generate(**inputs) self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]) context = ["a picture of"] inputs = processor(images=image, text=context, return_tensors="pt").to(torch_device, torch.float16) predictions = model.generate(**inputs) self.assertEqual( predictions[0].tolist(), [30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102], ) def test_inference_vqa(self): model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base") image = prepare_img() text = "how many dogs are in the picture?" inputs = processor(image, text=text, return_tensors="pt").to(torch_device) out = model.generate(**inputs) self.assertEqual(out[0].tolist(), [30522, 1015, 102]) def test_inference_itm(self): model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base-coco") image = prepare_img() text = "A woman and her dog sitting in a beach" inputs = processor(image, text, return_tensors="pt").to(torch_device) out_itm = model(**inputs) out = model(**inputs, use_itm_head=False) expected_scores = torch.Tensor([[0.0029, 0.9971]]) self.assertTrue(torch.allclose(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3)) self.assertTrue(torch.allclose(out[0].cpu(), torch.Tensor([[0.5162]]), rtol=1e-3, atol=1e-3))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blip model import unittest import numpy as np from transformers import bliptextconfig from transformers testingutils import requiretorch slow torchdevice from transformers utils import istorchavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask if istorchavailable import torch from transformers import bliptextmodel from transformers models blip modelingblip import blippretrainedmodelarchivelist class bliptextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 bostokenid0 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope self bostokenid bostokenid def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return bliptextconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange bostokenidself bostokenid def createandcheckmodelself config inputids inputmask model bliptextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class bliptextmodeltestmodeltestermixin unittest testcase allmodelclasses bliptextmodel if istorchavailable else fxcompatible false testpruning false testheadmasking false def setupself self modeltester bliptextmodeltesterself self configtester configtesterself configclassbliptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonblip does not use inputsembeds def testinputsembedsself pass unittest skipreasonbliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonbliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in blippretrainedmodelarchivelist 1 model bliptextmodel frompretrainedmodelname self assertisnotnonemodel def testpttfmodelequivalenceself super testpttfmodelequivalenceallowmissingkeystrue coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blip model
import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import BlipTextModel from transformers.models.blip.modeling_blip import BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class BlipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return BlipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) def create_and_check_model(self, config, input_ids, input_mask): model = BlipTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BlipTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = BlipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self): super().test_pt_tf_model_equivalence(allow_missing_keys=True)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow blip model from future import annotations import inspect import tempfile import unittest import numpy as np import requests from transformers import blipconfig bliptextconfig blipvisionconfig from transformers testingutils import requiretf requirevision slow from transformers utils import istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfblipforconditionalgeneration tfblipforimagetextretrieval tfblipforquestionanswering tfblipmodel tfbliptextmodel tfblipvisionmodel from transformers models blip modelingtfblip import tfblippretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import blipprocessor class tfblipvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange1e10 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return blipvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model tfblipvisionmodelconfigconfig result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfblipvisionmodeltesttfmodeltestermixin unittest testcase allmodelclasses tfblipvisionmodel if istfavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false testonnx false def setupself self modeltester tfblipvisionmodeltesterself self configtester configtesterself configclassblipvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonblip does not use inputsembeds def testinputsembedsself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings tf keras layers layer x model getoutputembeddings self asserttruex is none or isinstancex tf keras layers layer def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonblipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonblipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in tfblippretrainedmodelarchivelist 1 model tfblipvisionmodel frompretrainedmodelname self assertisnotnonemodel class tfbliptextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 bostokenid0 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope self bostokenid bostokenid def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none inputmask inputmask numpy batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 inputmask tf converttotensorinputmask config self getconfig return config inputids inputmask def getconfigself return bliptextconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange bostokenidself bostokenid def createandcheckmodelself config inputids inputmask model tfbliptextmodelconfigconfig result modelinputids attentionmaskinputmask trainingfalse result modelinputids trainingfalse self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretf class tfbliptextmodeltesttfmodeltestermixin unittest testcase allmodelclasses tfbliptextmodel if istfavailable else fxcompatible false testpruning false testheadmasking false testonnx false def setupself self modeltester tfbliptextmodeltesterself self configtester configtesterself configclassbliptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonblip does not use inputsembeds def testinputsembedsself pass unittest skipreasonbliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonbliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in tfblippretrainedmodelarchivelist 1 model tfbliptextmodel frompretrainedmodelname self assertisnotnonemodel def testpttfmodelequivalenceself allowmissingkeystrue super testpttfmodelequivalenceallowmissingkeysallowmissingkeys class tfblipmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester tfbliptextmodeltesterparent textkwargs self visionmodeltester tfblipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return blipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model tfblipmodelconfig result modelinputids pixelvalues attentionmask trainingfalse self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues returnloss true return config inputsdict requiretf class tfblipmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfblipmodel if istfavailable else pipelinemodelmapping featureextraction tfblipmodel imagetotext tfblipforconditionalgeneration if istfavailable else testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testonnx false def setupself self modeltester tfblipmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonblipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save blipconfig and check if we can load blipvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig blipvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save blipconfig and check if we can load bliptextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig bliptextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in tfblippretrainedmodelarchivelist 1 model tfblipmodel frompretrainedmodelname self assertisnotnonemodel def testpttfmodelequivalenceself allowmissingkeystrue super testpttfmodelequivalenceallowmissingkeysallowmissingkeys unittest skipmatt reenable this test when we have a proper export function for tf models def testsavedmodelcreationself this fails because the if returnloss conditional can return none or a tensor and tf hates that we could fix that by setting the bool to a constant when exporting but that requires a dedicated export function that we don t have yet pass class bliptextretrievalmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester tfbliptextmodeltesterparent textkwargs self visionmodeltester tfblipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return blipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model tfblipmodelconfig result modelinputids pixelvalues attentionmask trainingfalse self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues return config inputsdict class bliptextimagemodelsmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester tfbliptextmodeltesterparent textkwargs self visionmodeltester tfblipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return blipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model tfblipmodelconfig result modelinputids pixelvalues attentionmask trainingfalse self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids labels inputids attentionmask attentionmask pixelvalues pixelvalues return config inputsdict class blipvqamodelsmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester tfbliptextmodeltesterparent textkwargs self visionmodeltester tfblipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return blipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model tfblipmodelconfig result modelinputids pixelvalues attentionmask trainingfalse self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids decoderinputids inputids labels inputids attentionmask attentionmask pixelvalues pixelvalues return config inputsdict requiretf requirevision class tfblipvqamodeltesttfmodeltestermixin unittest testcase allmodelclasses tfblipforquestionanswering if istfavailable else testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testonnx false def setupself self modeltester blipvqamodelsmodeltesterself def prepareinputsforvqaself inputsdict self modeltester prepareconfigandinputsforcommon inputsdictlabels inputsdictinputids inputsdictdecoderinputids inputsdictinputids inputsdict popreturnloss return inputsdict def testclassnameconsistencyself for modelclass in self allmodelclasses model modelclassself modeltester getconfig self asserttrue model class name endswithforquestionanswering fclass name should end with forvisualquestionanswering got model class name def testtrainingself for modelclass in self allmodelclasses model modelclassself modeltester getconfig loss modelself modeltester prepareconfigandinputsforcommon1 trainingtrue loss self assertisnotnoneloss loss should not be none unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonblipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass unittest skipreasontested in individual model tests def testcompiletfmodelself pass unittest skipmodel doesn t have a clean loss output def testkerasfitself pass requiretf class tfbliptextretrievalmodeltesttfmodeltestermixin unittest testcase allmodelclasses tfblipforimagetextretrieval if istfavailable else testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testonnx false def setupself self modeltester bliptextretrievalmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonblipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass def testtrainingself if not self modeltester istraining return for modelclass in self allmodelclasses 1 config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true model modelclassconfig inputs self prepareforclassinputsdict modelclass returnlabelstrue hardcode labels to be the same as inputids inputslabels inputsinputids loss modelinputs trainingtrue loss self asserttrueloss is not none def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save blipconfig and check if we can load blipvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig blipvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save blipconfig and check if we can load bliptextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig bliptextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in tfblippretrainedmodelarchivelist 1 model tfblipmodel frompretrainedmodelname self assertisnotnonemodel unittest skipreasontested in individual model tests def testcompiletfmodelself pass unittest skipmodel doesn t have a clean loss output def testkerasfitself pass requiretf class tfbliptextimagemodeltesttfmodeltestermixin unittest testcase allmodelclasses tfblipforconditionalgeneration if istfavailable else testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testonnx false def setupself self modeltester bliptextimagemodelsmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys if model config isencoderdecoder expectedargnames inputids attentionmask decoderinputids decoderattentionmask expectedargnames extend headmask decoderheadmask crossattnheadmask encoderoutputs if headmask and decoderheadmask and crossattnheadmask in argnames else encoderoutputs self assertlistequalargnames lenexpectedargnames expectedargnames else expectedargnames inputids if modelclass tfblipforconditionalgeneration else pixelvalues self assertlistequalargnames 1 expectedargnames unittest skipreasontested in individual model tests def testcompiletfmodelself pass unittest skiphas some odd input names def testkerasfitself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonblipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass def testtrainingself if not self modeltester istraining return for modelclass in self allmodelclasses 1 config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true model modelclassconfig inputs self prepareforclassinputsdict modelclass returnlabelstrue hardcode labels to be the same as inputids inputslabels inputsinputids loss modelinputs trainingtrue loss self assertisnotnoneloss def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save blipconfig and check if we can load blipvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig blipvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save blipconfig and check if we can load bliptextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig bliptextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in tfblippretrainedmodelarchivelist 1 model tfblipmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg url https huggingface cohfinternaltestingbliptestimageresolvemaindemo jpg im image openrequests geturl streamtrue raw return im requirevision requiretf slow class tfblipmodelintegrationtestunittest testcase def testinferenceimagecaptioningself model tfblipforconditionalgeneration frompretrainedsalesforceblipimagecaptioningbase processor blipprocessor frompretrainedsalesforceblipimagecaptioningbase image prepareimg image only inputs processorimagesimage returntensorstf predictions model generateinputs test output self assertequal predictions0 numpy tolist 30522 1037 2450 3564 2006 1996 3509 2007 2014 3899 102 image and context context a picture of inputs processorimagesimage textcontext returntensorstf predictions model generateinputs test output self assertequal predictions0 numpy tolist 30522 1037 3861 1997 1037 2450 1998 2014 3899 2006 1996 3509 102 def testinferencevqaself model tfblipforquestionanswering frompretrainedsalesforceblipvqabase processor blipprocessor frompretrainedsalesforceblipvqabase image prepareimg text how many dogs are in the picture inputs processorimage texttext returntensorstf out model generateinputs test output self assertequalout0 numpy tolist 30522 1015 102 def testinferenceitmself model tfblipforimagetextretrieval frompretrainedsalesforceblipitmbasecoco processor blipprocessor frompretrainedsalesforceblipitmbasecoco image prepareimg text a woman and her dog sitting in a beach inputs processorimage text returntensorstf outitm modelinputs out modelinputs useitmheadfalse trainingfalse expectedscores tf converttotensor0 0029 0 9971 self asserttruenp allclosetf nn softmaxoutitm0 numpy expectedscores rtol1e3 atol1e3 self asserttruenp allcloseout0 tf converttotensor0 5162 rtol1e3 atol1e3 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow blip model in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as blip does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic save blipconfig and check if we can load blipvisionconfig from it save blipconfig and check if we can load bliptextconfig from it this fails because the if return_loss conditional can return none or a tensor and tf hates that we could fix that by setting the bool to a constant when exporting but that requires a dedicated export function that we don t have yet tests that all vqa models have a class name that ends with forquestionanswering tests that all vqa models can be trained on a single batch hardcode labels to be the same as input_ids save blipconfig and check if we can load blipvisionconfig from it save blipconfig and check if we can load bliptextconfig from it signature parameters is an ordereddict so arg_names order is deterministic hardcode labels to be the same as input_ids save blipconfig and check if we can load blipvisionconfig from it save blipconfig and check if we can load bliptextconfig from it we will verify our results on an image of cute cats image only test output image and context test output test output
from __future__ import annotations import inspect import tempfile import unittest import numpy as np import requests from transformers import BlipConfig, BlipTextConfig, BlipVisionConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipTextModel, TFBlipVisionModel, ) from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import BlipProcessor class TFBlipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return BlipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = TFBlipVisionModel(config=config) result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFBlipVisionModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipVisionModel,) if is_tf_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFBlipVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="BlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class TFBlipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: input_mask = input_mask.numpy() batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 input_mask = tf.convert_to_tensor(input_mask) config = self.get_config() return config, input_ids, input_mask def get_config(self): return BlipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) def create_and_check_model(self, config, input_ids, input_mask): model = TFBlipTextModel(config=config) result = model(input_ids, attention_mask=input_mask, training=False) result = model(input_ids, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFBlipTextModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipTextModel,) if is_tf_available() else () fx_compatible = False test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFBlipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self, allow_missing_keys=True): super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) class TFBlipModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = TFBlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = TFBlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFBlipModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_tf class TFBlipModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFBlipModel,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFBlipModel, "image-to-text": TFBlipForConditionalGeneration} if is_tf_available() else {} ) test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = TFBlipModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self, allow_missing_keys=True): super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) @unittest.skip("Matt: Re-enable this test when we have a proper export function for TF models.") def test_saved_model_creation(self): pass class BlipTextRetrievalModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = TFBlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = TFBlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFBlipModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipTextImageModelsModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = TFBlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = TFBlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFBlipModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "labels": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipVQAModelsModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = TFBlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = TFBlipVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFBlipModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "decoder_input_ids": input_ids, "labels": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_tf @require_vision class TFBlipVQAModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipForQuestionAnswering,) if is_tf_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = BlipVQAModelsModelTester(self) def _prepare_inputs_for_vqa(self): _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict["labels"] = inputs_dict["input_ids"] inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict.pop("return_loss") return inputs_dict def test_class_name_consistency(self): for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) self.assertTrue( model.__class__.__name__.endswith("ForQuestionAnswering"), f"Class name should end with 'ForVisualQuestionAnswering' got {model.__class__.__name__}", ) def test_training(self): for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) loss = model(**self.model_tester.prepare_config_and_inputs_for_common()[1], training=True).loss self.assertIsNotNone(loss, "Loss should not be None") @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="Tested in individual model tests") def test_compile_tf_model(self): pass @unittest.skip("Model doesn't have a clean loss output.") def test_keras_fit(self): pass @require_tf class TFBlipTextRetrievalModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipForImageTextRetrieval,) if is_tf_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = BlipTextRetrievalModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) inputs["labels"] = inputs["input_ids"] loss = model(**inputs, training=True).loss self.assertTrue(loss is not None) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Tested in individual model tests") def test_compile_tf_model(self): pass @unittest.skip("Model doesn't have a clean loss output.") def test_keras_fit(self): pass @require_tf class TFBlipTextImageModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipForConditionalGeneration,) if is_tf_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = BlipTextImageModelsModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ( ["input_ids"] if model_class != TFBlipForConditionalGeneration else ["pixel_values"] ) self.assertListEqual(arg_names[:1], expected_arg_names) @unittest.skip(reason="Tested in individual model tests") def test_compile_tf_model(self): pass @unittest.skip("Has some odd input names!") def test_keras_fit(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) inputs["labels"] = inputs["input_ids"] loss = model(**inputs, training=True).loss self.assertIsNotNone(loss) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_tf @slow class TFBlipModelIntegrationTest(unittest.TestCase): def test_inference_image_captioning(self): model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") image = prepare_img() inputs = processor(images=image, return_tensors="tf") predictions = model.generate(**inputs) self.assertEqual( predictions[0].numpy().tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] ) context = ["a picture of"] inputs = processor(images=image, text=context, return_tensors="tf") predictions = model.generate(**inputs) self.assertEqual( predictions[0].numpy().tolist(), [30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102], ) def test_inference_vqa(self): model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base") image = prepare_img() text = "how many dogs are in the picture?" inputs = processor(image, text=text, return_tensors="tf") out = model.generate(**inputs) self.assertEqual(out[0].numpy().tolist(), [30522, 1015, 102]) def test_inference_itm(self): model = TFBlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco") processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base-coco") image = prepare_img() text = "A woman and her dog sitting in a beach" inputs = processor(image, text, return_tensors="tf") out_itm = model(**inputs) out = model(**inputs, use_itm_head=False, training=False) expected_scores = tf.convert_to_tensor([[0.0029, 0.9971]]) self.assertTrue(np.allclose(tf.nn.softmax(out_itm[0]).numpy(), expected_scores, rtol=1e-3, atol=1e-3)) self.assertTrue(np.allclose(out[0], tf.convert_to_tensor([[0.5162]]), rtol=1e-3, atol=1e-3))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow blip model from future import annotations import unittest import numpy as np from transformers import bliptextconfig from transformers testingutils import requiretf slow from transformers utils import istfavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin idstensor randomattentionmask if istfavailable import tensorflow as tf from transformers import tfbliptextmodel from transformers models blip modelingtfblip import tfblippretrainedmodelarchivelist class bliptextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 bostokenid0 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope self bostokenid bostokenid def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none inputmask inputmask numpy batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids tf converttotensorinputmask def getconfigself return bliptextconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange bostokenidself bostokenid def createandcheckmodelself config inputids inputmask model tfbliptextmodelconfigconfig result modelinputids attentionmaskinputmask trainingfalse result modelinputids trainingfalse self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretf class bliptextmodeltesttfmodeltestermixin unittest testcase allmodelclasses tfbliptextmodel if istfavailable else testonnx false testpruning false testheadmasking false def setupself self modeltester bliptextmodeltesterself self configtester configtesterself configclassbliptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonblip does not use inputsembeds def testinputsembedsself pass unittest skipreasonbliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonbliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in tfblippretrainedmodelarchivelist 1 model tfbliptextmodel frompretrainedmodelname self assertisnotnonemodel def testpttfmodelequivalenceself allowmissingkeystrue super testpttfmodelequivalenceallowmissingkeysallowmissingkeys coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow blip model
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class BlipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: input_mask = input_mask.numpy() batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, tf.convert_to_tensor(input_mask) def get_config(self): return BlipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) def create_and_check_model(self, config, input_ids, input_mask): model = TFBlipTextModel(config=config) result = model(input_ids, attention_mask=input_mask, training=False) result = model(input_ids, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class BlipTextModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlipTextModel,) if is_tf_available() else () test_onnx = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = BlipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFBlipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self, allow_missing_keys=True): super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpifytrue or a list of pytorch tensors if one specifies torchifytrue test if it raises when no input is passed for now the processor supports only pixelvalues inputids attentionmask 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpify true or a list of pytorch tensors if one specifies torchify true test if it raises when no input is passed for now the processor supports only pixel_values input_ids attention_mask
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class BlipProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = BlipImageProcessor() tokenizer = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel") processor = BlipProcessor(image_processor, tokenizer) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = BlipProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, BlipImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask"]) with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask"])
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blip2 model import inspect import tempfile import unittest import numpy as np import requests from transformers import configmapping blip2config blip2qformerconfig blip2visionconfig from transformers testingutils import requiretorch requiretorchmultiaccelerator requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import blip2forconditionalgeneration blip2model blip2visionmodel from transformers models blip2 modelingblip2 import blip2pretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import blip2processor class blip2visionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange1e10 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return blip2visionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model blip2visionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class blip2visionmodeltestmodeltestermixin unittest testcase allmodelclasses blip2visionmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester blip2visionmodeltesterself self configtester configtester self configclassblip2visionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonblip2 s vision encoder does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonblip2visionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonblip2visionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in blip2pretrainedmodelarchivelist 1 model blip2visionmodel frompretrainedmodelname self assertisnotnonemodel class blip2qformermodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 bostokenid0 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope self bostokenid bostokenid def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return blip2qformerconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange bostokenidself bostokenid this class is based on optmodeltester found in testsmodelsopttestmodelingopt py class blip2textmodeldecoderonlytester def init self parent batchsize12 seqlength7 istrainingtrue uselabelsfalse vocabsize99 hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings20 eostokenid2 padtokenid1 bostokenid0 embeddim16 numlabels3 wordembedprojdim16 typesequencelabelsize2 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid self embeddim embeddim self numlabels numlabels self typesequencelabelsize typesequencelabelsize self wordembedprojdim wordembedprojdim self isencoderdecoder false def prepareconfigandinputsself config self getconfig inputids idstensorself batchsize self seqlength self vocabsize clamp3 inputids 1 self eostokenid eos token attentionmask inputids neself padtokenid return config inputids attentionmask def getconfigself return configmappingopt vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads ffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid embeddimself embeddim isencoderdecoderfalse wordembedprojdimself wordembedprojdim this model tester uses a decoderonly language model opt class blip2forconditionalgenerationdecoderonlymodeltester def init self parent visionkwargsnone qformerkwargsnone textkwargsnone istrainingtrue numquerytokens10 if visionkwargs is none visionkwargs if qformerkwargs is none qformerkwargs if textkwargs is none textkwargs self parent parent self visionmodeltester blip2visionmodeltesterparent visionkwargs self qformermodeltester blip2qformermodeltesterparent qformerkwargs self textmodeltester blip2textmodeldecoderonlytesterparent textkwargs self istraining istraining self numquerytokens numquerytokens def prepareconfigandinputsself pixelvalues self visionmodeltester prepareconfigandinputs inputids attentionmask self textmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return blip2config fromvisionqformertextconfigs visionconfigself visionmodeltester getconfig qformerconfigself qformermodeltester getconfig textconfigself textmodeltester getconfig numquerytokensself numquerytokens def createandcheckforconditionalgenerationself config inputids attentionmask pixelvalues model blip2forconditionalgenerationconfig totorchdevice eval with torch nograd result modelpixelvalues inputids attentionmask expectedseqlength self numquerytokens self textmodeltester seqlength self parent assertequal result logits shape self visionmodeltester batchsize expectedseqlength self textmodeltester vocabsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict pixelvalues pixelvalues inputids inputids attentionmask attentionmask labels inputids return config inputsdict requiretorch class blip2forconditionalgenerationdecoderonlytestmodeltestermixin unittest testcase allmodelclasses blip2forconditionalgeneration if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testtorchscript false def setupself self modeltester blip2forconditionalgenerationdecoderonlymodeltesterself def testforconditionalgenerationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforconditionalgenerationconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonblip2model does not have inputoutput embeddings def testmodelcommonattributesself pass unittest skipreasonthere s no base blip2model def testsaveloadfastinitfrombaseself pass unittest skipreasonthere s no base blip2model def testsaveloadfastinittobaseself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testloadvisionqformertextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save blip2config and check if we can load blip2visionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig blip2visionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save blip2config and check if we can load blip2qformerconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname qformerconfig blip2qformerconfig frompretrainedtmpdirname self assertdictequalconfig qformerconfig todict qformerconfig todict slow def testmodelfrompretrainedself for modelname in blip2pretrainedmodelarchivelist model blip2forconditionalgeneration frompretrainedmodelname self assertisnotnonemodel this class is based on t5modeltester found in testsmodelst5testmodelingt5 py class blip2textmodeltester def init self parent vocabsize99 batchsize12 encoderseqlength7 decoderseqlength9 for common tests istrainingtrue useattentionmasktrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 dff37 relativeattentionnumbuckets8 dropoutrate0 1 initializerfactor0 002 eostokenid1 padtokenid0 decoderstarttokenid0 scopenone decoderlayersnone self parent parent self batchsize batchsize self encoderseqlength encoderseqlength self decoderseqlength decoderseqlength for common tests self seqlength self decoderseqlength self istraining istraining self useattentionmask useattentionmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self dff dff self relativeattentionnumbuckets relativeattentionnumbuckets self dropoutrate dropoutrate self initializerfactor initializerfactor self eostokenid eostokenid self padtokenid padtokenid self decoderstarttokenid decoderstarttokenid self scope none self decoderlayers decoderlayers def prepareconfigandinputsself inputids idstensorself batchsize self encoderseqlength self vocabsize decoderinputids idstensorself batchsize self decoderseqlength self vocabsize attentionmask none decoderattentionmask none if self useattentionmask attentionmask idstensorself batchsize self encoderseqlength vocabsize2 decoderattentionmask idstensorself batchsize self decoderseqlength vocabsize2 lmlabels none if self uselabels lmlabels idstensorself batchsize self decoderseqlength self vocabsize config self getconfig return config inputids decoderinputids attentionmask decoderattentionmask lmlabels def getconfigself return configmappingt5 vocabsizeself vocabsize dmodelself hiddensize dffself dff dkvself hiddensize self numattentionheads numlayersself numhiddenlayers numdecoderlayersself decoderlayers numheadsself numattentionheads relativeattentionnumbucketsself relativeattentionnumbuckets dropoutrateself dropoutrate initializerfactorself initializerfactor eostokenidself eostokenid bostokenidself padtokenid padtokenidself padtokenid decoderstarttokenidself decoderstarttokenid this model tester uses an encoderdecoder language model t5 class blip2modeltester def init self parent visionkwargsnone qformerkwargsnone textkwargsnone istrainingtrue numquerytokens10 if visionkwargs is none visionkwargs if qformerkwargs is none qformerkwargs if textkwargs is none textkwargs self parent parent self visionmodeltester blip2visionmodeltesterparent visionkwargs self qformermodeltester blip2qformermodeltesterparent qformerkwargs self textmodeltester blip2textmodeltesterparent textkwargs self istraining istraining self numquerytokens numquerytokens def prepareconfigandinputsself pixelvalues self visionmodeltester prepareconfigandinputs inputids decoderinputids attentionmask decoderattentionmask lmlabels self textmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues decoderinputids decoderattentionmask lmlabels def getconfigself return blip2config fromvisionqformertextconfigs visionconfigself visionmodeltester getconfig qformerconfigself qformermodeltester getconfig textconfigself textmodeltester getconfig numquerytokensself numquerytokens def createandcheckforconditionalgeneration self config inputids attentionmask pixelvalues decoderinputids decoderattentionmask labels model blip2forconditionalgenerationconfig totorchdevice eval with torch nograd result modelpixelvalues inputids attentionmask decoderinputids decoderattentionmask self parent assertequal result logits shape self visionmodeltester batchsize self textmodeltester seqlength self textmodeltester vocabsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues decoderinputids decoderattentionmask labels configandinputs inputsdict pixelvalues pixelvalues inputids inputids attentionmask attentionmask decoderinputids decoderinputids decoderattentionmask decoderattentionmask labels labels return config inputsdict requiretorch class blip2modeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses blip2forconditionalgeneration blip2model if istorchavailable else pipelinemodelmapping featureextraction blip2model imagetotext blip2forconditionalgeneration visualquestionanswering blip2forconditionalgeneration if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testtorchscript false def setupself self modeltester blip2modeltesterself def testforconditionalgenerationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforconditionalgenerationconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonblip2model does not have inputoutput embeddings def testmodelcommonattributesself pass unittest skipreasonthere s no base blip2model def testsaveloadfastinitfrombaseself pass unittest skipreasonthere s no base blip2model def testsaveloadfastinittobaseself pass unittest skipreasondoes not work on the tiny model as we keep hitting edge cases def testcpuoffloadself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testloadvisionqformertextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save blip2config and check if we can load blip2visionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig blip2visionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save blip2config and check if we can load blip2qformerconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname qformerconfig blip2qformerconfig frompretrainedtmpdirname self assertdictequalconfig qformerconfig todict qformerconfig todict slow def testmodelfrompretrainedself for modelname in blip2pretrainedmodelarchivelist model blip2forconditionalgeneration frompretrainedmodelname self assertisnotnonemodel def testgettextfeaturesself config self modeltester prepareconfigandinputsforcommon inputsdict inputids torch longtensor1 2 3 4 5 6 7 8 9 10 totorchdevice attentionmask torch longtensor1 1 1 1 1 1 1 1 1 1 totorchdevice decoderinputids torch longtensor1 2 3 4 5 6 7 8 9 10 totorchdevice model blip2modelconfig totorchdevice model eval textfeatures model gettextfeaturesinputsdict self assertequaltextfeatures0 shape 1 10 config textconfig vocabsize def testgetimagefeaturesself config inputsdict self modeltester prepareconfigandinputsforcommon keystopop inputids attentionmask decoderinputids decoderattentionmask labels for key in keystopop inputsdict popkey model blip2modelconfig totorchdevice model eval imagefeatures model getimagefeaturesinputsdict self assertequal imagefeatures0 shape self modeltester visionmodeltester batchsize self modeltester visionmodeltester seqlength config visionconfig hiddensize def testgetqformerfeaturesself config inputsdict self modeltester prepareconfigandinputsforcommon keystopop inputids attentionmask decoderinputids decoderattentionmask labels for key in keystopop inputsdict popkey model blip2modelconfig totorchdevice model eval qformerfeatures model getqformerfeaturesinputsdict self assertequal qformerfeatures0 shape self modeltester visionmodeltester batchsize 10 config visionconfig hiddensize override from common to deal with nested configurations visionconfig textconfig and qformerconfig def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for key in visionconfig qformerconfig textconfig setattrconfigsnoinit key configzeroinitgetattrconfigsnoinit key for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized we will verify our results on an image of cute cats def prepareimg url https huggingface cohfinternaltestingbliptestimageresolvemaindemo jpg image image openrequests geturl streamtrue raw return image requirevision requiretorch slow class blip2modelintegrationtestunittest testcase def testinferenceoptself processor blip2processor frompretrainedsalesforceblip2opt2 7b model blip2forconditionalgeneration frompretrained salesforceblip2opt2 7b torchdtypetorch float16 totorchdevice prepare image image prepareimg inputs processorimagesimage returntensorspt totorchdevice dtypetorch float16 predictions model generateinputs generatedtext processor batchdecodepredictions skipspecialtokenstrue0 strip test output self assertequalpredictions0 tolist 2 102 693 2828 15 5 4105 19 10 2335 50118 self assertequala woman sitting on the beach with a dog generatedtext image and context prompt question which city is this answer inputs processorimagesimage textprompt returntensorspt totorchdevice dtypetorch float16 predictions model generateinputs generatedtext processor batchdecodepredictions skipspecialtokenstrue0 strip test output self assertequal predictions0 tolist 2 24 18 45 10 343 6 24 18 10 4105 50118 self assertequalgeneratedtext it s not a city it s a beach def testinferenceoptbatchedbeamsearchself processor blip2processor frompretrainedsalesforceblip2opt2 7b model blip2forconditionalgeneration frompretrained salesforceblip2opt2 7b torchdtypetorch float16 totorchdevice prepare image image prepareimg inputs processorimagesimage image returntensorspt totorchdevice dtypetorch float16 predictions model generateinputs numbeams2 test output in this case slightly different from greedy search self assertequalpredictions0 tolist 2 102 693 2828 15 5 4105 19 69 2335 50118 self assertequalpredictions1 tolist 2 102 693 2828 15 5 4105 19 69 2335 50118 def testinferencet5self processor blip2processor frompretrainedsalesforceblip2flant5xl model blip2forconditionalgeneration frompretrained salesforceblip2flant5xl torchdtypetorch float16 totorchdevice prepare image image prepareimg inputs processorimagesimage returntensorspt totorchdevice dtypetorch float16 predictions model generateinputs generatedtext processor batchdecodepredictions skipspecialtokenstrue0 strip test output self assertequalpredictions0 tolist 0 2335 1556 28 1782 30 8 2608 1 self assertequalwoman playing with dog on the beach generatedtext image and context prompt question which city is this answer inputs processorimagesimage textprompt returntensorspt totorchdevice dtypetorch float16 predictions model generateinputs generatedtext processor batchdecodepredictions skipspecialtokenstrue0 strip test output self assertequal predictions0 tolist 0 3 7 152 67 839 1 self assertequalgeneratedtext san diego def testinferencet5batchedbeamsearchself processor blip2processor frompretrainedsalesforceblip2flant5xl model blip2forconditionalgeneration frompretrained salesforceblip2flant5xl torchdtypetorch float16 totorchdevice prepare image image prepareimg inputs processorimagesimage image returntensorspt totorchdevice dtypetorch float16 predictions model generateinputs numbeams2 test output in this case slightly different from greedy search self assertequalpredictions0 tolist 0 2335 1556 28 1782 30 8 2608 1 self assertequalpredictions1 tolist 0 2335 1556 28 1782 30 8 2608 1 requiretorchmultiaccelerator def testinferenceoptmultiacceleratorself processor blip2processor frompretrainedsalesforceblip2opt2 7b model blip2forconditionalgeneration frompretrained salesforceblip2opt2 7b torchdtypetorch float16 devicemapbalanced prepare image image prepareimg inputs processorimagesimage returntensorspt to0 dtypetorch float16 predictions model generateinputs generatedtext processor batchdecodepredictions skipspecialtokenstrue0 strip test output self assertequalpredictions0 tolist 2 102 693 2828 15 5 4105 19 10 2335 50118 self assertequala woman sitting on the beach with a dog generatedtext image and context prompt question which city is this answer inputs processorimagesimage textprompt returntensorspt to0 dtypetorch float16 predictions model generateinputs generatedtext processor batchdecodepredictions skipspecialtokenstrue0 strip test output self assertequal predictions0 tolist 2 24 18 45 10 343 6 24 18 10 4105 50118 self assertequalgeneratedtext it s not a city it s a beach requiretorchmultiaccelerator def testinferencet5multiacceleratorself processor blip2processor frompretrainedsalesforceblip2flant5xl devicemap devicemap querytokens 0 visionmodel 0 languagemodel 1 languageprojection 0 qformer 0 model blip2forconditionalgeneration frompretrained salesforceblip2flant5xl torchdtypetorch float16 devicemapdevicemap prepare image image prepareimg inputs processorimagesimage returntensorspt to0 dtypetorch float16 predictions model generateinputs generatedtext processor batchdecodepredictions skipspecialtokenstrue0 strip test output self assertequalpredictions0 tolist 0 2335 1556 28 1782 30 8 2608 1 self assertequalwoman playing with dog on the beach generatedtext image and context prompt question which city is this answer inputs processorimagesimage textprompt returntensorspt to0 dtypetorch float16 predictions model generateinputs generatedtext processor batchdecodepredictions skipspecialtokenstrue0 strip test output self assertequal predictions0 tolist 0 3 7 152 67 839 1 self assertequalgeneratedtext san diego coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch blip 2 model in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as blip 2 s vision encoder does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic this class is based on optmodeltester found in tests models opt test_modeling_opt py eos token this model tester uses a decoder only language model opt signature parameters is an ordereddict so arg_names order is deterministic save blip2config and check if we can load blip2visionconfig from it save blip2config and check if we can load blip2qformerconfig from it this class is based on t5modeltester found in tests models t5 test_modeling_t5 py for common tests for common tests this model tester uses an encoder decoder language model t5 signature parameters is an ordereddict so arg_names order is deterministic save blip2config and check if we can load blip2visionconfig from it save blip2config and check if we can load blip2qformerconfig from it override from common to deal with nested configurations vision_config text_config and qformer_config we will verify our results on an image of cute cats prepare image test output image and context test output prepare image test output in this case slightly different from greedy search prepare image test output image and context test output prepare image test output in this case slightly different from greedy search prepare image test output image and context test output prepare image test output image and context test output
import inspect import tempfile import unittest import numpy as np import requests from transformers import CONFIG_MAPPING, Blip2Config, Blip2QFormerConfig, Blip2VisionConfig from transformers.testing_utils import ( require_torch, require_torch_multi_accelerator, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Blip2ForConditionalGeneration, Blip2Model, Blip2VisionModel from transformers.models.blip_2.modeling_blip_2 import BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import Blip2Processor class Blip2VisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return Blip2VisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = Blip2VisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Blip2VisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Blip2VisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Blip2VisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Blip2VisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="BLIP-2's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Blip2VisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="Blip2VisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Blip2VisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class Blip2QFormerModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Blip2QFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) class Blip2TextModelDecoderOnlyTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, num_labels=3, word_embed_proj_dim=16, type_sequence_label_size=2, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.num_labels = num_labels self.type_sequence_label_size = type_sequence_label_size self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs(self): config = self.get_config() input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_ids[:, -1] = self.eos_token_id attention_mask = input_ids.ne(self.pad_token_id) return config, input_ids, attention_mask def get_config(self): return CONFIG_MAPPING["opt"]( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, is_encoder_decoder=False, word_embed_proj_dim=self.word_embed_proj_dim, ) class Blip2ForConditionalGenerationDecoderOnlyModelTester: def __init__( self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10 ): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {} if text_kwargs is None: text_kwargs = {} self.parent = parent self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs) self.text_model_tester = Blip2TextModelDecoderOnlyTester(parent, **text_kwargs) self.is_training = is_training self.num_query_tokens = num_query_tokens def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() _, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return Blip2Config.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), text_config=self.text_model_tester.get_config(), num_query_tokens=self.num_query_tokens, ) def create_and_check_for_conditional_generation(self, config, input_ids, attention_mask, pixel_values): model = Blip2ForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model(pixel_values, input_ids, attention_mask) expected_seq_length = self.num_query_tokens + self.text_model_tester.seq_length self.parent.assertEqual( result.logits.shape, (self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "labels": input_ids, } return config, inputs_dict @require_torch class Blip2ForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Blip2ForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = Blip2ForConditionalGenerationDecoderOnlyModelTester(self) def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Blip2Model does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="There's no base Blip2Model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="There's no base Blip2Model") def test_save_load_fast_init_to_base(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST: model = Blip2ForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) class Blip2TextModelTester: def __init__( self, parent, vocab_size=99, batch_size=12, encoder_seq_length=7, decoder_seq_length=9, is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_config(self): return CONFIG_MAPPING["t5"]( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) class Blip2ModelTester: def __init__( self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10 ): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {} if text_kwargs is None: text_kwargs = {} self.parent = parent self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs) self.text_model_tester = Blip2TextModelTester(parent, **text_kwargs) self.is_training = is_training self.num_query_tokens = num_query_tokens def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() ( _, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, lm_labels def get_config(self): return Blip2Config.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), text_config=self.text_model_tester.get_config(), num_query_tokens=self.num_query_tokens, ) def create_and_check_for_conditional_generation( self, config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, labels ): model = Blip2ForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model(pixel_values, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask) self.parent.assertEqual( result.logits.shape, ( self.vision_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.vocab_size, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "labels": labels, } return config, inputs_dict @require_torch class Blip2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Blip2ForConditionalGeneration, Blip2Model) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Blip2Model, "image-to-text": Blip2ForConditionalGeneration, "visual-question-answering": Blip2ForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = Blip2ModelTester(self) def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Blip2Model does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="There's no base Blip2Model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="There's no base Blip2Model") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_cpu_offload(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST: model = Blip2ForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) def test_get_text_features(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = { "input_ids": torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]).to(torch_device), "attention_mask": torch.LongTensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]).to(torch_device), "decoder_input_ids": torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]).to(torch_device), } model = Blip2Model(config).to(torch_device) model.eval() text_features = model.get_text_features(**inputs_dict) self.assertEqual(text_features[0].shape, (1, 10, config.text_config.vocab_size)) def test_get_image_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() keys_to_pop = ["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] for key in keys_to_pop: inputs_dict.pop(key) model = Blip2Model(config).to(torch_device) model.eval() image_features = model.get_image_features(**inputs_dict) self.assertEqual( image_features[0].shape, ( self.model_tester.vision_model_tester.batch_size, self.model_tester.vision_model_tester.seq_length, config.vision_config.hidden_size, ), ) def test_get_qformer_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() keys_to_pop = ["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] for key in keys_to_pop: inputs_dict.pop(key) model = Blip2Model(config).to(torch_device) model.eval() qformer_features = model.get_qformer_features(**inputs_dict) self.assertEqual( qformer_features[0].shape, (self.model_tester.vision_model_tester.batch_size, 10, config.vision_config.hidden_size), ) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for key in ["vision_config", "qformer_config", "text_config"]: setattr(configs_no_init, key, _config_zero_init(getattr(configs_no_init, key))) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch @slow class Blip2ModelIntegrationTest(unittest.TestCase): def test_inference_opt(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16 ).to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118]) self.assertEqual("a woman sitting on the beach with a dog", generated_text) prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual( predictions[0].tolist(), [2, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118], ) self.assertEqual(generated_text, "it's not a city, it's a beach") def test_inference_opt_batched_beam_search(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16 ).to(torch_device) image = prepare_img() inputs = processor(images=[image, image], return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs, num_beams=2) self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118]) self.assertEqual(predictions[1].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118]) def test_inference_t5(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-flan-t5-xl", torch_dtype=torch.float16 ).to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) self.assertEqual("woman playing with dog on the beach", generated_text) prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual( predictions[0].tolist(), [0, 3, 7, 152, 67, 839, 1], ) self.assertEqual(generated_text, "san diego") def test_inference_t5_batched_beam_search(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-flan-t5-xl", torch_dtype=torch.float16 ).to(torch_device) image = prepare_img() inputs = processor(images=[image, image], return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs, num_beams=2) self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) self.assertEqual(predictions[1].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) @require_torch_multi_accelerator def test_inference_opt_multi_accelerator(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16, device_map="balanced" ) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118]) self.assertEqual("a woman sitting on the beach with a dog", generated_text) prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual( predictions[0].tolist(), [2, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118], ) self.assertEqual(generated_text, "it's not a city, it's a beach") @require_torch_multi_accelerator def test_inference_t5_multi_accelerator(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") device_map = device_map = { "query_tokens": 0, "vision_model": 0, "language_model": 1, "language_projection": 0, "qformer": 0, } model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-flan-t5-xl", torch_dtype=torch.float16, device_map=device_map ) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1]) self.assertEqual("woman playing with dog on the beach", generated_text) prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() self.assertEqual( predictions[0].tolist(), [0, 3, 7, 152, 67, 839, 1], ) self.assertEqual(generated_text, "san diego")
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpifytrue or a list of pytorch tensors if one specifies torchifytrue test if it raises when no input is passed for now the processor supports only pixelvalues inputids attentionmask 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function prepares a list of pil images or a list of numpy arrays if one specifies numpify true or a list of pytorch tensors if one specifies torchify true test if it raises when no input is passed for now the processor supports only pixel_values input_ids attention_mask
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, Blip2Processor, BlipImageProcessor, GPT2Tokenizer, PreTrainedTokenizerFast @require_vision class Blip2ProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = BlipImageProcessor() tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model") processor = Blip2Processor(image_processor, tokenizer) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = Blip2Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = Blip2Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, BlipImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask"]) with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask"])
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice this test is a bit flaky for some gpu architectures pytorch sets by default allowfp16reducedprecisionreduction true and some operations do not give the same results under this configuration especially torch baddmm and torch bmm https pytorch orgdocsstablenotesnumericalaccuracy htmlfp16onmi200 as we leave the default value true for allowfp16reducedprecisionreduction the tests failed when running in halfprecision with smaller models 560m please see https pytorch orgdocsstablenotescuda htmlreducedprecisionreductioninfp16gemms this discrepancy is observed only when using small models and seems to be stable for larger models our conclusion is that these operations are flaky for small inputs but seems to be stable for larger inputs for the functions baddmm and bmm and therefore for larger models here is a summary of an ablation study of our observations expectedoutput i enjoy walking with my cute dog and i love to watch the kids play i am a very active person and i am a very good listener i am a very good person and i am a very good person i am a 560m allowfp16reducedprecisionreduction false torch bmm pass 560m allowfp16reducedprecisionreduction false torch baddm pass 560m allowfp16reducedprecisionreduction true torch baddm pass 560m allowfp16reducedprecisionreduction true torch bmm fail expectedoutput i enjoy walking with my cute dog but i also enjoy hiking biking and swimming i love to cook and bake i love to cook and bake i love to cook and bake i love to cook and bake i love 1b1 allowfp16reducedprecisionreduction true torch baddm pass for usecachetrue and usecachefalse 1b1 allowfp16reducedprecisionreduction true torch bmm pass 1b1 allowfp16reducedprecisionreduction false torch bmm pass this output has been obtained using fp32 model on the huggingface dgx workstation nvidia a100 gpu test token values test reconstructions these generations match those of the pytorch model the goal here is to compare the embeddings generated by the model trained using megatronlm with the one from the transformers library with a small gpt2like model to ensure that the conversion from megatronlm to transformers has been done successfully the script compares the logits of the embedding layer and the transformer layers warning it is expected that these logits will not have exactly the same statistics when running the code on cpu or gpu for more info please visit https github compytorchpytorchissues76052issuecomment1103193548 https discuss pytorch orgtreproducibilityissuebetweenintelandamdcpus1447799 you need to install tokenizers following this readme https huggingface cobigsciencecataloguedatadevbytelevelbpetokenizernonorm250kwhitespaceandeosregexalphav3deduplinesarticles tokenizer used during training https huggingface cobigsciencecataloguedatadevbytelevelbpetokenizernonorm250kwhitespaceandeosregexalphav3deduplinesarticles todo change the script or just add skip when building the env with tokenizers 0 12 0 the config in this checkpoint has bfloat16 as torchdtype model in bfloat16 first check the embeddings before ln this test does not pass when places 2 coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice torch autograd functions seems to be not supported this test is a bit flaky for some gpu architectures pytorch sets by default allow_fp16_reduced_precision_reduction true and some operations do not give the same results under this configuration especially torch baddmm and torch bmm https pytorch org docs stable notes numerical_accuracy html fp16 on mi200 as we leave the default value true for allow_fp16_reduced_precision_reduction the tests failed when running in half precision with smaller models 560m please see https pytorch org docs stable notes cuda html reduced precision reduction in fp16 gemms this discrepancy is observed only when using small models and seems to be stable for larger models our conclusion is that these operations are flaky for small inputs but seems to be stable for larger inputs for the functions baddmm and bmm and therefore for larger models here is a summary of an ablation study of our observations expected_output i enjoy walking with my cute dog and i love to watch the kids play i am a very active person and i am a very good listener i am a very good person and i am a very good person i am a 560m allow_fp16_reduced_precision_reduction false torch bmm pass 560m allow_fp16_reduced_precision_reduction false torch baddm pass 560m allow_fp16_reduced_precision_reduction true torch baddm pass 560m allow_fp16_reduced_precision_reduction true torch bmm fail expected_output i enjoy walking with my cute dog but i also enjoy hiking biking and swimming i love to cook and bake i love to cook and bake i love to cook and bake i love to cook and bake i love 1b1 allow_fp16_reduced_precision_reduction true torch baddm pass for use_cache true and use_cache false 1b1 allow_fp16_reduced_precision_reduction true torch bmm pass 1b1 allow_fp16_reduced_precision_reduction false torch bmm pass this output has been obtained using fp32 model on the huggingface dgx workstation nvidia a100 gpu test token values test reconstructions these generations match those of the pytorch model the goal here is to compare the embeddings generated by the model trained using megatron lm with the one from the transformers library with a small gpt2 like model to ensure that the conversion from megatron lm to transformers has been done successfully the script compares the logits of the embedding layer and the transformer layers warning it is expected that these logits will not have exactly the same statistics when running the code on cpu or gpu for more info please visit https github com pytorch pytorch issues 76052 issuecomment 1103193548 https discuss pytorch org t reproducibility issue between intel and amd cpus 144779 9 you need to install tokenizers following this readme https huggingface co bigscience catalogue data dev byte level bpe tokenizer no norm 250k whitespace and eos regex alpha v3 dedup lines articles tokenizer used during training https huggingface co bigscience catalogue data dev byte level bpe tokenizer no norm 250k whitespace and eos regex alpha v3 dedup lines articles todo change the script or just add skip when building the env with tokenizers 0 12 0 the config in this checkpoint has bfloat16 as torch_dtype model in bfloat16 fmt skip first check the embeddings before ln this test does not pass when places 2 fmt skip load in bf16 fmt skip 1e 06 precision
import math import unittest from transformers import BloomConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomTokenizerFast, ) @require_torch class BloomModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=False, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_dropout_prob = attention_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def get_large_model_config(self): return BloomConfig.from_pretrained("bigscience/bloom") def prepare_config_and_inputs(self, gradient_checkpointing=False): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config(gradient_checkpointing=gradient_checkpointing) return (config, input_ids, input_mask, sequence_labels) def get_config(self, gradient_checkpointing=False, slow_but_exact=True): return BloomConfig( vocab_size=self.vocab_size, seq_length=self.seq_length, hidden_size=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, hidden_dropout=self.hidden_dropout_prob, attention_dropout=self.attention_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, num_labels=self.num_labels, gradient_checkpointing=gradient_checkpointing, slow_but_exact=slow_but_exact, dtype="float32", ) def create_and_check_bloom_model(self, config, input_ids, input_mask, *args): model = BloomModel(config=config) model.to(torch_device) model.eval() result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_bloom_model_past(self, config, input_ids, input_mask, *args): model = BloomModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True) outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids)) outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids)) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past = outputs["past_key_values"] next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_bloom_model_attention_mask_past(self, config, input_ids, input_mask, *args): model = BloomModel(config=config) model.to(torch_device) model.eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 output, past = model(input_ids, attention_mask=attn_mask).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_bloom_model_past_large_inputs(self, config, input_ids, input_mask, *args): model = BloomModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[ "last_hidden_state" ] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args): model = BloomForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args): config.num_labels = self.num_labels model = BloomForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args): model = BloomForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_question_answering_model(self, config, input_ids, input_mask, *args): model = BloomForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, *args, gradient_checkpointing=False ): model = BloomForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_bloom_weight_initialization(self, config, *args): model = BloomModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask, sequence_labels = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict @require_torch class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BloomModel, BloomForCausalLM, BloomForSequenceClassification, BloomForTokenClassification, BloomForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (BloomForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": BloomModel, "question-answering": BloomForQuestionAnswering, "text-classification": BloomForSequenceClassification, "text-generation": BloomForCausalLM, "token-classification": BloomForTokenClassification, "zero-shot": BloomForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_missing_keys = False test_pruning = False test_torchscript = True def setUp(self): self.model_tester = BloomModelTester(self) self.config_tester = ConfigTester(self, config_class=BloomConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_bloom_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bloom_model(*config_and_inputs) def test_bloom_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bloom_model_past(*config_and_inputs) def test_bloom_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bloom_model_attention_mask_past(*config_and_inputs) def test_bloom_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bloom_model_past_large_inputs(*config_and_inputs) def test_bloom_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_bloom_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs) def test_bloom_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_token_classification_model(*config_and_inputs) def test_bloom_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_bloom_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs) @unittest.skip("Bloom has a non-standard KV cache format.") def test_past_key_values_format(self): pass @slow def test_model_from_pretrained(self): for model_name in BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BloomModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_simple_generation(self): path_560m = "bigscience/bloom-560m" model = BloomForCausalLM.from_pretrained(path_560m, use_cache=True, revision="gs555750").to(torch_device) model = model.eval() tokenizer = BloomTokenizerFast.from_pretrained(path_560m) input_sentence = "I enjoy walking with my cute dog" EXPECTED_OUTPUT = ( "I enjoy walking with my cute dog, and I love to watch the kids play with the kids. I am a very " "active person, and I enjoy working out, and I am a very active person. I am a very active person, and I" ) input_ids = tokenizer.encode(input_sentence, return_tensors="pt") greedy_output = model.generate(input_ids.to(torch_device), max_length=50) self.assertEqual(tokenizer.decode(greedy_output[0], skip_special_tokens=True), EXPECTED_OUTPUT) @slow @require_torch_accelerator def test_batch_generation(self): path_560m = "bigscience/bloom-560m" model = BloomForCausalLM.from_pretrained(path_560m, use_cache=True, revision="gs555750").to(torch_device) model = model.eval() tokenizer = BloomTokenizerFast.from_pretrained(path_560m, padding_side="left") input_sentence = ["I enjoy walking with my cute dog", "I enjoy walking with my cute dog"] inputs = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) attention_mask = inputs["attention_mask"] greedy_output = model.generate(input_ids, attention_mask=attention_mask, max_length=50, do_sample=False) self.assertEqual( tokenizer.decode(greedy_output[0], skip_special_tokens=True), tokenizer.decode(greedy_output[1], skip_special_tokens=True), ) @slow @require_torch_accelerator def test_batch_generation_padd(self): path_560m = "bigscience/bloom-560m" model = BloomForCausalLM.from_pretrained(path_560m, use_cache=True, revision="gs555750").to(torch_device) model = model.eval() tokenizer = BloomTokenizerFast.from_pretrained(path_560m, padding_side="left") input_sentence = ["I enjoy walking with my cute dog", "Hello my name is"] input_sentence_without_pad = "Hello my name is" input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True) input_ids_without_pad = tokenizer.encode(input_sentence_without_pad, return_tensors="pt") input_ids, attention_mask = input_ids["input_ids"].to(torch_device), input_ids["attention_mask"] greedy_output = model.generate(input_ids, attention_mask=attention_mask, max_length=50, do_sample=False) greedy_output_without_pad = model.generate( input_ids_without_pad.to(torch_device), max_length=50, do_sample=False ) self.assertEqual(greedy_output[-1, 3:].tolist(), greedy_output_without_pad[0, :-3].tolist()) self.assertEqual( tokenizer.decode(greedy_output[-1, 3:], skip_special_tokens=True), tokenizer.decode(greedy_output_without_pad[0, :-3], skip_special_tokens=True), ) @slow @require_torch_accelerator def test_batch_generated_text(self): path_560m = "bigscience/bloom-560m" model = BloomForCausalLM.from_pretrained(path_560m, use_cache=True, revision="gs555750").to(torch_device) model = model.eval() tokenizer = BloomTokenizerFast.from_pretrained(path_560m, padding_side="left") input_sentences = [ "Hello what is", "Running a quick test with the", ] inputs = tokenizer(input_sentences, return_tensors="pt", padding=True, truncation=True) generated_ids = model.generate( inputs["input_ids"].to(torch_device), attention_mask=inputs["attention_mask"], max_length=20 ) generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_GENERATIONS = [ "Hello what is the best way to get the data from the server? I have tried", "Running a quick test with the following command:\nsudo apt-get install python3\nsudo apt-get install python2", ] self.assertListEqual(generated_text, EXPECTED_GENERATIONS) @require_torch class BloomEmbeddingTest(unittest.TestCase): def setUp(self): super().setUp() self.path_bigscience_model = "bigscience/bigscience-small-testing" @require_torch def test_embeddings(self): model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype="auto") model.eval() EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN = { 3478: 0.0002307891845703125, 368: -0.000568389892578125, 109586: -0.0003910064697265625, 35433: -0.000194549560546875, 2: 0.0004138946533203125, 77: 0.000659942626953125, 132619: -0.00031280517578125, 2175: 0.000457763671875, 23714: 0.000263214111328125, 73173: -0.000286102294921875, 144252: 0.00052642822265625, } EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN = { 3478: -0.00921630859375, 368: -0.010009765625, 109586: -0.01031494140625, 35433: -0.01177978515625, 2: -0.0074462890625, 77: -0.00848388671875, 132619: -0.009521484375, 2175: -0.0074462890625, 23714: -0.0145263671875, 73173: -0.007415771484375, 144252: -0.01007080078125, } EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX = { 3478: 0.0128173828125, 368: 0.01214599609375, 109586: 0.0111083984375, 35433: 0.01019287109375, 2: 0.0157470703125, 77: 0.0174560546875, 132619: 0.0078125, 2175: 0.0113525390625, 23714: 0.0146484375, 73173: 0.01116943359375, 144252: 0.01141357421875, } EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM = {"value": 0.08203125} EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN = { 132619: -0.00031256675720214844, 3478: 0.00023090839385986328, 368: -0.0005702972412109375, 109586: -0.00039124488830566406, 35433: -0.000194549560546875, 2: 0.0004146099090576172, 2175: 0.0004572868347167969, 23714: 0.00026416778564453125, 73173: -0.0002865791320800781, 144252: 0.0005254745483398438, 77: 0.0006618499755859375, } EMBEDDINGS_DS_BEFORE_LN_F_16_MIN = { 3478: -0.00921630859375, 368: -0.010009765625, 109586: -0.01031494140625, 35433: -0.01177978515625, 2: -0.0074462890625, 77: -0.00848388671875, 132619: -0.009521484375, 2175: -0.0074462890625, 23714: -0.0145263671875, 73173: -0.007415771484375, 144252: -0.01007080078125, } EMBEDDINGS_DS_BEFORE_LN_F_16_MAX = { 3478: 0.0128173828125, 368: 0.01214599609375, 109586: 0.0111083984375, 35433: 0.01019287109375, 2: 0.0157470703125, 77: 0.0174560546875, 132619: 0.0078125, 2175: 0.0113525390625, 23714: 0.0146484375, 73173: 0.01116943359375, 144252: 0.01141357421875, } EMBEDDINGS_DS_BEFORE_LN_F_16_SUM = {"value": 0.0821533203125} EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN = { 132619: -0.00031267106533050537, 3478: 0.00023087859153747559, 368: -0.0005701072514057159, 109586: -0.0003911703824996948, 35433: -0.0001944899559020996, 2: 0.0004146844148635864, 2175: 0.00045740045607089996, 23714: 0.0002641640603542328, 73173: -0.0002864748239517212, 144252: 0.0005256589502096176, 77: 0.0006617321632802486, } EMBEDDINGS_DS_BEFORE_LN_F_32_MIN = { 3478: -0.00921630859375, 368: -0.010009765625, 109586: -0.01031494140625, 35433: -0.01177978515625, 2: -0.0074462890625, 77: -0.00848388671875, 132619: -0.009521484375, 2175: -0.0074462890625, 23714: -0.0145263671875, 73173: -0.007415771484375, 144252: -0.01007080078125, } EMBEDDINGS_DS_BEFORE_LN_F_32_MAX = { 3478: 0.0128173828125, 368: 0.01214599609375, 109586: 0.0111083984375, 35433: 0.01019287109375, 2: 0.0157470703125, 77: 0.0174560546875, 132619: 0.0078125, 2175: 0.0113525390625, 23714: 0.0146484375, 73173: 0.01116943359375, 144252: 0.01141357421875, } EMBEDDINGS_DS_BEFORE_LN_F_32_SUM = {"value": 0.08217757940292358} TEST_EMBEDDINGS = { "torch.bfloat16": { "mean": EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN, "max": EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX, "min": EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN, "sum": EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM, }, "torch.float32": { "mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN, "max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX, "min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN, "sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM, }, "torch.float": { "mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN, "max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX, "min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN, "sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM, }, "torch.float16": { "mean": EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN, "max": EMBEDDINGS_DS_BEFORE_LN_F_16_MAX, "min": EMBEDDINGS_DS_BEFORE_LN_F_16_MIN, "sum": EMBEDDINGS_DS_BEFORE_LN_F_16_SUM, }, } EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478] EMBEDDINGS_DS_AFTER_LN_MEAN = { 3478: -6.580352783203125e-05, 368: 0.0001316070556640625, 109586: -0.00030517578125, 35433: 4.00543212890625e-05, 2: -7.2479248046875e-05, 77: -8.96453857421875e-05, 132619: 0.0001583099365234375, 2175: 2.1219253540039062e-05, 23714: -0.000247955322265625, 73173: -0.00021839141845703125, 144252: -0.0001430511474609375, } EMBEDDINGS_DS_AFTER_LN_MIN = { 3478: -1.6953125, 368: -1.6875, 109586: -1.6875, 35433: -2.125, 2: -1.390625, 77: -1.5390625, 132619: -1.875, 2175: -1.4609375, 23714: -2.296875, 73173: -1.3515625, 144252: -1.78125, } EMBEDDINGS_DS_AFTER_LN_MAX = { 3478: 2.265625, 368: 2.28125, 109586: 1.953125, 35433: 1.90625, 2: 2.703125, 77: 2.828125, 132619: 1.65625, 2175: 2.015625, 23714: 2.234375, 73173: 2.171875, 144252: 1.828125, } EMBEDDINGS_DS_AFTER_LN = { "mean": EMBEDDINGS_DS_AFTER_LN_MEAN, "min": EMBEDDINGS_DS_AFTER_LN_MIN, "max": EMBEDDINGS_DS_AFTER_LN_MAX, } tensor_ids = torch.LongTensor([EXAMPLE_IDS]) with torch.no_grad(): embeddings = model.transformer.word_embeddings(tensor_ids) embeddings_ln = model.transformer.word_embeddings_layernorm(embeddings) output_dict = {"min": {}, "max": {}, "mean": {}, "sum": {"value": embeddings.sum().item()}} for i, idx in enumerate(EXAMPLE_IDS): output_dict["min"][idx] = embeddings.min(dim=-1).values[0][i].item() output_dict["max"][idx] = embeddings.max(dim=-1).values[0][i].item() output_dict["mean"][idx] = embeddings.mean(dim=-1)[0][i].item() for key in TEST_EMBEDDINGS[str(model.dtype)].keys(): self.assertDictEqual(TEST_EMBEDDINGS[str(model.dtype)][key], output_dict[key]) output_dict_norm = {"min": {}, "max": {}, "mean": {}} for i, idx in enumerate(EXAMPLE_IDS): output_dict_norm["min"][idx] = embeddings_ln.min(dim=-1).values[0][i].item() output_dict_norm["max"][idx] = embeddings_ln.max(dim=-1).values[0][i].item() output_dict_norm["mean"][idx] = embeddings_ln.mean(dim=-1)[0][i].item() for i, key in enumerate(output_dict_norm.keys()): for j, idx in enumerate(output_dict[key].keys()): self.assertAlmostEqual(EMBEDDINGS_DS_AFTER_LN[key][idx], output_dict_norm[key][idx], places=1) @require_torch def test_hidden_states_transformers(self): cuda_available = torch.cuda.is_available() model = BloomModel.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to( torch_device ) model.eval() EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478] MEAN_VALUE_LAST_LM = -4.3392181396484375e-05 MIN_MAX_DICT = {"min": -2.0625, "max": 2.75} tensor_ids = torch.LongTensor([EXAMPLE_IDS]) with torch.no_grad(): logits = model(tensor_ids.to(torch_device)) output_dict = { "min": logits.last_hidden_state.min(dim=-1).values[0][0].item(), "max": logits.last_hidden_state.max(dim=-1).values[0][0].item(), } if cuda_available: self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=4) else: self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=3) self.assertDictEqual(MIN_MAX_DICT, output_dict) @require_torch def test_logits(self): cuda_available = torch.cuda.is_available() model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to( torch_device ) model.eval() EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478] MEAN_LOGITS_GPU_1 = -1.823902130126953e-05 MEAN_LOGITS_GPU_2 = 1.9431114196777344e-05 tensor_ids = torch.LongTensor([EXAMPLE_IDS]).to(torch_device) with torch.no_grad(): output = model(tensor_ids).logits output_gpu_1, output_gpu_2 = output.split(125440, dim=-1) if cuda_available: self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6) self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6) else: self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6) self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6)
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs ioenlatestgpumemoryallocation html tests if the model outputs the same generation for the same batched input tests if the model outputs the same generation for an input that is part of a batch and a single input these generations match those of the pytorch model ensuring correctness 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs io en latest gpu_memory_allocation html tests if the model outputs the same generation for the same batched input tests if the model outputs the same generation for an input that is part of a batch and a single input these generations match those of the pytorch model ensuring correctness
import unittest import numpy as np from transformers import BloomConfig, BloomTokenizerFast, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax.numpy as jnp from transformers import FlaxBloomForCausalLM, FlaxBloomModel def prepare_bloom_inputs_dict(config, input_ids, attention_mask=None): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_flax class FlaxBloomModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, n_layer=2, n_head=4, hidden_act="gelu", hidden_dropout=0.1, attention_probs_dropout_prob=0.1, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, apply_residual_connection_post_layernorm=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = n_layer self.num_attention_heads = n_head self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_probs_dropout_prob = attention_probs_dropout_prob self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range self.is_encoder_decoder = False self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) config = BloomConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_probs_dropout_prob, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=False, use_cache=False, ) inputs_dict = prepare_bloom_inputs_dict(config, input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_length = 20 model = model_class_name(config) input_ids = inputs_dict["input_ids"] attention_mask = jnp.ones((input_ids.shape[0], max_length), dtype="i4") past_key_values = model.init_cache(input_ids.shape[0], max_length) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, ) outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_length = 20 model = model_class_name(config) input_ids, attention_mask = ( inputs_dict["input_ids"], inputs_dict["attention_mask"], ) attention_mask_cache = jnp.concatenate( [ attention_mask, jnp.zeros((attention_mask.shape[0], max_length - attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_length) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, ) outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxBloomModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): all_model_classes = (FlaxBloomModel, FlaxBloomForCausalLM) if is_flax_available() else () all_generative_model_classes = () if is_flax_available() else () def setUp(self): self.model_tester = FlaxBloomModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("bigscience/bloom-560m") input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs) @slow @require_flax class FlaxBloomGenerationTest(unittest.TestCase): all_model_classes = (FlaxBloomForCausalLM,) if is_flax_available() else () all_generative_model_classes = () if is_flax_available() else () def setUp(self): self.model_id = "bigscience/bloom-560m" self.tokenizer = BloomTokenizerFast.from_pretrained(self.model_id, padding_side="left") self.model_tester = FlaxBloomModelTester(self) self.model = FlaxBloomForCausalLM.from_pretrained(self.model_id, from_pt=True, revision="gs555750") def test_model_batched_gen(self): input_sentences = [ "Hello there is this string is definitely longer I believe that", "Hello there is this string is definitely longer I believe that", ] inputs = self.tokenizer(input_sentences, return_tensors="np", padding=True, truncation=True) sequences_fx = self.model.generate(**inputs, max_length=20).sequences self.assertEqual(sequences_fx[0].tolist(), sequences_fx[1].tolist()) def test_model_batched_padding_left(self): input_sentences_batch = [ "Hello there is this string is definitely longer I believe that", "Hi I want to order", ] inputs = self.tokenizer(input_sentences_batch, return_tensors="np", padding=True, truncation=True) sequences_fx_batch = self.model.generate(**inputs, max_length=20).sequences input_sentence_simple = "Hi I want to order" inputs_simple = self.tokenizer(input_sentence_simple, return_tensors="np") sequences_fx_simple = self.model.generate(**inputs_simple, max_length=20).sequences self.assertEqual(sequences_fx_batch[1][6:].tolist(), sequences_fx_simple[0][:-6].tolist()) def test_batch_generated_text(self): input_sentences = [ "Hello what is", "Running a quick test with the", ] inputs = self.tokenizer(input_sentences, return_tensors="np", padding=True, truncation=True) generated_ids = self.model.generate(**inputs, max_length=20).sequences generated_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_GENERATIONS = [ "Hello what is the best way to get the data from the server? I have tried", "Running a quick test with the following command:\nsudo apt-get install python3\nsudo apt-get install python2", ] self.assertListEqual(generated_text, EXPECTED_GENERATIONS)
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license assert that the created tokens are the same than the hardcoded ones tokenizerr padtoken none hotfixing padding none simple input simple input tests simple input simple input pair input pair input pair input tests the tokenizer downloaded from here https huggingface cobigsciencetokenizer the test has to be overriden because bloom uses alibi positional embeddings that does not have any sequence length constraints this test of the parent class will fail since it relies on the maximum sequence length of the positoonal embeddings coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license assert that the created tokens are the same than the hard coded ones tokenizer_r pad_token none hotfixing padding none simple input simple input tests hotfixing padding none simple input simple input pair input pair input pair input tests the tokenizer downloaded from here https huggingface co bigscience tokenizer pick up one data the test has to be overriden because bloom uses alibi positional embeddings that does not have any sequence length constraints this test of the parent class will fail since it relies on the maximum sequence length of the positoonal embeddings
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_jinja, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class BloomTokenizationTest(TokenizerTesterMixin, unittest.TestCase): slow_tokenizer_class = None rust_tokenizer_class = BloomTokenizerFast tokenizer_class = BloomTokenizerFast test_rust_tokenizer = True test_slow_tokenizer = False from_pretrained_vocab_key = "tokenizer_file" special_tokens_map = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def setUp(self): super().setUp() tokenizer = BloomTokenizerFast.from_pretrained("bigscience/tokenizer") tokenizer.save_pretrained(self.tmpdirname) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return BloomTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) @unittest.skip("This needs a slow tokenizer. Bloom does not have one!") def test_encode_decode_with_spaces(self): return def test_encodings_from_sample_data(self): tokenizer = self.get_rust_tokenizer() INPUT_SENTENCES = ["The quick brown fox</s>", "jumps over the lazy dog</s>"] TARGET_TOKENS = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] computed_tokens = tokenizer.batch_encode_plus(INPUT_SENTENCES)["input_ids"] self.assertListEqual(TARGET_TOKENS, computed_tokens) decoded_tokens = tokenizer.batch_decode(computed_tokens) self.assertListEqual(decoded_tokens, INPUT_SENTENCES) def test_padding(self, max_length=6): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] try: tokenizer_r.encode(s, max_length=max_length) tokenizer_r.encode_plus(s, max_length=max_length) tokenizer_r.batch_encode_plus(s2, max_length=max_length) tokenizer_r.encode(p, max_length=max_length) tokenizer_r.batch_encode_plus(p2, max_length=max_length) except ValueError: self.fail("Bloom Tokenizer should be able to deal with padding") tokenizer_r.pad_token = None self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length", ) self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length", ) def test_encodings_from_xnli_dataset(self): tokenizer = self.get_rust_tokenizer() ds = load_dataset("xnli", "all_languages", split="test", streaming=True) sample_data = next(iter(ds))["premise"] input_text = list(sample_data.values()) output_tokens = list(map(tokenizer.encode, input_text)) predicted_text = [tokenizer.decode(x, clean_up_tokenization_spaces=False) for x in output_tokens] self.assertListEqual(predicted_text, input_text) def test_pretrained_model_lists(self): self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1) @require_jinja def test_tokenization_for_chat(self): tokenizer = self.get_rust_tokenizer() test_chats = [ [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}], [ {"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Nice to meet you."}, ], [{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}], ] tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats] expected_tokens = [ [5448, 1306, 267, 66799, 44799, 37143, 17, 2, 59414, 4, 2], [5448, 1306, 267, 66799, 44799, 37143, 17, 2, 59414, 4, 2, 229126, 427, 11890, 1152, 17, 2], [229126, 427, 11890, 1152, 17, 2, 59414, 4, 2], ] for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens): self.assertListEqual(tokenized_chat, expected_tokens) def test_add_prefix_space_fast(self): tokenizer_w_prefix = self.get_rust_tokenizer(add_prefix_space=True) tokenizer_wo_prefix = self.get_rust_tokenizer(add_prefix_space=False) tokens_w_prefix = tokenizer_w_prefix.tokenize("Hey") tokens_wo_prefix = tokenizer_wo_prefix.tokenize("Hey") self.assertNotEqual(tokens_w_prefix, tokens_wo_prefix)
codingutf8 2023 the intel labs team s the microsoft research team s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function computes the expected height and width when providing images to bridgetowerimageprocessor assuming doresize is set to true with a scalar size and sizedivisor coding utf 8 2023 the intel labs team s the microsoft research team s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function computes the expected height and width when providing images to bridgetowerimageprocessor assuming do_resize is set to true with a scalar size and size_divisor
import unittest from typing import Dict, List, Optional, Union from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class BridgeTowerImageProcessingTester(unittest.TestCase): def __init__( self, parent, do_resize: bool = True, size: Dict[str, int] = None, size_divisor: int = 32, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, do_center_crop: bool = True, image_mean: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073], image_std: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711], do_pad: bool = True, batch_size=7, min_resolution=30, max_resolution=400, num_channels=3, ): self.parent = parent self.do_resize = do_resize self.size = size if size is not None else {"shortest_edge": 288} self.size_divisor = size_divisor self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_center_crop = do_center_crop self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def get_expected_values(self, image_inputs, batched=False): if not batched: size = self.size["shortest_edge"] image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] scale = size / min(w, h) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size max_size = int((1333 / 800) * size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class BridgeTowerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BridgeTowerImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = BridgeTowerImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "size_divisor"))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bros model import copy import unittest from transformers testingutils import requiretorch slow torchdevice from transformers utils import istorchavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import brosconfig brosfortokenclassification brosmodel brosspadeeefortokenclassification brosspadeelfortokenclassification from transformers models bros modelingbros import brospretrainedmodelarchivelist class brosmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue usebboxfirsttokenmasktrue uselabelstrue vocabsize99 hiddensize64 numhiddenlayers5 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usebboxfirsttokenmask usebboxfirsttokenmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize bbox idstensorself batchsize self seqlength 8 1 ensure that bbox is legal for i in rangebbox shape0 for j in rangebbox shape1 if bboxi j 3 bboxi j 1 t bboxi j 3 bboxi j 3 bboxi j 1 bboxi j 1 t if bboxi j 2 bboxi j 0 t bboxi j 2 bboxi j 2 bboxi j 0 bboxi j 0 t inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength bboxfirsttokenmask none if self usebboxfirsttokenmask bboxfirsttokenmask torch onesself batchsize self seqlength dtypetorch bool totorchdevice tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize tokenlabels none if self uselabels tokenlabels idstensorself batchsize self seqlength self numlabels initialtokenlabels idstensorself batchsize self seqlength self numlabels subsequenttokenlabels idstensorself batchsize self seqlength self numlabels config self getconfig return config inputids bbox tokentypeids inputmask bboxfirsttokenmask tokenlabels initialtokenlabels subsequenttokenlabels def getconfigself return brosconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange def createandcheckmodel self config inputids bbox tokentypeids inputmask bboxfirsttokenmask tokenlabels initialtokenlabels subsequenttokenlabels model brosmodelconfigconfig model totorchdevice model eval result modelinputids bboxbbox attentionmaskinputmask tokentypeidstokentypeids result modelinputids bboxbbox tokentypeidstokentypeids result modelinputids bboxbbox self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckfortokenclassification self config inputids bbox tokentypeids inputmask bboxfirsttokenmask tokenlabels initialtokenlabels subsequenttokenlabels config numlabels self numlabels model brosfortokenclassificationconfigconfig model totorchdevice model eval result model inputids bboxbbox attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckforspadeeetokenclassification self config inputids bbox tokentypeids inputmask bboxfirsttokenmask tokenlabels initialtokenlabels subsequenttokenlabels config numlabels self numlabels model brosspadeeefortokenclassificationconfigconfig model totorchdevice model eval result model inputids bboxbbox attentionmaskinputmask bboxfirsttokenmaskbboxfirsttokenmask tokentypeidstokentypeids initialtokenlabelstokenlabels subsequenttokenlabelstokenlabels self parent assertequalresult initialtokenlogits shape self batchsize self seqlength self numlabels self parent assertequal result subsequenttokenlogits shape self batchsize self seqlength self seqlength 1 def createandcheckforspadeeltokenclassification self config inputids bbox tokentypeids inputmask bboxfirsttokenmask tokenlabels initialtokenlabels subsequenttokenlabels config numlabels self numlabels model brosspadeelfortokenclassificationconfigconfig model totorchdevice model eval result model inputids bboxbbox attentionmaskinputmask bboxfirsttokenmaskbboxfirsttokenmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self seqlength 1 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids bbox tokentypeids inputmask bboxfirsttokenmask tokenlabels initialtokenlabels subsequenttokenlabels configandinputs inputsdict inputids inputids bbox bbox tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class brosmodeltestmodeltestermixin pipelinetestermixin unittest testcase testpruning false testtorchscript false testmismatchedshapes false allmodelclasses brosfortokenclassification brosspadeeefortokenclassification brosspadeelfortokenclassification brosmodel if istorchavailable else allgenerativemodelclasses if istorchavailable else pipelinemodelmapping featureextraction brosmodel tokenclassification brosfortokenclassification if istorchavailable else bros requires bbox in the inputs which doesn t fit into the above 2 pipelines input formats see https github comhuggingfacetransformerspull26294 def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname return true def setupself self modeltester brosmodeltesterself self configtester configtesterself configclassbrosconfig hiddensize37 def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict copy deepcopyinputsdict if returnlabels if modelclass name in brosfortokenclassification brosspadeelfortokenclassification inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice inputsdictbboxfirsttokenmask torch ones self modeltester batchsize self modeltester seqlength dtypetorch bool devicetorchdevice elif modelclass name in brosspadeeefortokenclassification inputsdictinitialtokenlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice inputsdictsubsequenttokenlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice inputsdictbboxfirsttokenmask torch ones self modeltester batchsize self modeltester seqlength dtypetorch bool devicetorchdevice return inputsdict def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmultigpudataparallelforwardself super testmultigpudataparallelforward def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testforspadeeetokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforspadeeetokenclassificationconfigandinputs def testforspadeeltokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforspadeeltokenclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in brospretrainedmodelarchivelist 1 model brosmodel frompretrainedmodelname self assertisnotnonemodel def preparebrosbatchinputs attentionmask torch tensor1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 bbox torch tensor 0 0000 0 0000 0 0000 0 0000 0 5223 0 5590 0 5787 0 5720 0 5853 0 5590 0 6864 0 5720 0 5853 0 5590 0 6864 0 5720 0 1234 0 5700 0 2192 0 5840 0 2231 0 5680 0 2782 0 5780 0 2874 0 5670 0 3333 0 5780 0 3425 0 5640 0 4344 0 5750 0 0866 0 7770 0 1181 0 7870 0 1168 0 7770 0 1522 0 7850 0 1535 0 7750 0 1864 0 7850 0 1890 0 7750 0 2572 0 7850 1 0000 1 0000 1 0000 1 0000 0 0000 0 0000 0 0000 0 0000 0 4396 0 6720 0 4659 0 6850 0 4698 0 6720 0 4843 0 6850 0 1575 0 6870 0 2021 0 6980 0 2047 0 6870 0 2730 0 7000 0 1299 0 7010 0 1430 0 7140 0 1299 0 7010 0 1430 0 7140 0 1562 0 7010 0 2441 0 7120 0 1562 0 7010 0 2441 0 7120 0 2454 0 7010 0 3150 0 7120 0 3176 0 7010 0 3320 0 7110 0 3333 0 7000 0 4029 0 7140 1 0000 1 0000 1 0000 1 0000 inputids torch tensor 101 1055 8910 1012 5719 3296 5366 3378 2146 2846 10807 13494 102 101 2112 1997 3671 6364 1019 1012 5057 1011 4646 2030 2974 102 return inputids bbox attentionmask requiretorch class brosmodelintegrationtestunittest testcase slow def testinferencenoheadself model brosmodel frompretrainedjinho8345brosbaseuncased totorchdevice inputids bbox attentionmask preparebrosbatchinputs with torch nograd outputs model inputids totorchdevice bbox totorchdevice attentionmaskattentionmask totorchdevice returndicttrue verify the logits expectedshape torch size2 13 768 self assertequaloutputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 3074 0 1363 0 3143 0 0925 0 1155 0 1050 0 0221 0 0003 0 1285 totorchdevice torch setprintoptionsscimodefalse self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch bros model ensure that bbox is legal bros requires bbox in the inputs which doesn t fit into the above 2 pipelines input formats see https github com huggingface transformers pull 26294 verify the logits
import copy import unittest from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BrosConfig, BrosForTokenClassification, BrosModel, BrosSpadeEEForTokenClassification, BrosSpadeELForTokenClassification, ) from transformers.models.bros.modeling_bros import ( BROS_PRETRAINED_MODEL_ARCHIVE_LIST, ) class BrosModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_bbox_first_token_mask=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_bbox_first_token_mask = use_bbox_first_token_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 8], 1) for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) bbox_first_token_mask = None if self.use_bbox_first_token_mask: bbox_first_token_mask = torch.ones([self.batch_size, self.seq_length], dtype=torch.bool).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) initial_token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) subsequent_token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return ( config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ) def get_config(self): return BrosConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): model = BrosModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): config.num_labels = self.num_labels model = BrosForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_spade_ee_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): config.num_labels = self.num_labels model = BrosSpadeEEForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, bbox_first_token_mask=bbox_first_token_mask, token_type_ids=token_type_ids, initial_token_labels=token_labels, subsequent_token_labels=token_labels, ) self.parent.assertEqual(result.initial_token_logits.shape, (self.batch_size, self.seq_length, self.num_labels)) self.parent.assertEqual( result.subsequent_token_logits.shape, (self.batch_size, self.seq_length, self.seq_length + 1) ) def create_and_check_for_spade_el_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): config.num_labels = self.num_labels model = BrosSpadeELForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, bbox_first_token_mask=bbox_first_token_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.seq_length + 1)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class BrosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = False test_mismatched_shapes = False all_model_classes = ( ( BrosForTokenClassification, BrosSpadeEEForTokenClassification, BrosSpadeELForTokenClassification, BrosModel, ) if is_torch_available() else () ) all_generative_model_classes = () if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": BrosModel, "token-classification": BrosForTokenClassification} if is_torch_available() else {} ) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = BrosModelTester(self) self.config_tester = ConfigTester(self, config_class=BrosConfig, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class.__name__ in ["BrosForTokenClassification", "BrosSpadeELForTokenClassification"]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["bbox_first_token_mask"] = torch.ones( [self.model_tester.batch_size, self.model_tester.seq_length], dtype=torch.bool, device=torch_device, ) elif model_class.__name__ in ["BrosSpadeEEForTokenClassification"]: inputs_dict["initial_token_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["subsequent_token_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["bbox_first_token_mask"] = torch.ones( [self.model_tester.batch_size, self.model_tester.seq_length], dtype=torch.bool, device=torch_device, ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_spade_ee_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_spade_ee_token_classification(*config_and_inputs) def test_for_spade_el_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_spade_el_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in BROS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BrosModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_bros_batch_inputs(): attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) bbox = torch.tensor( [ [ [0.0000, 0.0000, 0.0000, 0.0000], [0.5223, 0.5590, 0.5787, 0.5720], [0.5853, 0.5590, 0.6864, 0.5720], [0.5853, 0.5590, 0.6864, 0.5720], [0.1234, 0.5700, 0.2192, 0.5840], [0.2231, 0.5680, 0.2782, 0.5780], [0.2874, 0.5670, 0.3333, 0.5780], [0.3425, 0.5640, 0.4344, 0.5750], [0.0866, 0.7770, 0.1181, 0.7870], [0.1168, 0.7770, 0.1522, 0.7850], [0.1535, 0.7750, 0.1864, 0.7850], [0.1890, 0.7750, 0.2572, 0.7850], [1.0000, 1.0000, 1.0000, 1.0000], ], [ [0.0000, 0.0000, 0.0000, 0.0000], [0.4396, 0.6720, 0.4659, 0.6850], [0.4698, 0.6720, 0.4843, 0.6850], [0.1575, 0.6870, 0.2021, 0.6980], [0.2047, 0.6870, 0.2730, 0.7000], [0.1299, 0.7010, 0.1430, 0.7140], [0.1299, 0.7010, 0.1430, 0.7140], [0.1562, 0.7010, 0.2441, 0.7120], [0.1562, 0.7010, 0.2441, 0.7120], [0.2454, 0.7010, 0.3150, 0.7120], [0.3176, 0.7010, 0.3320, 0.7110], [0.3333, 0.7000, 0.4029, 0.7140], [1.0000, 1.0000, 1.0000, 1.0000], ], ] ) input_ids = torch.tensor( [ [101, 1055, 8910, 1012, 5719, 3296, 5366, 3378, 2146, 2846, 10807, 13494, 102], [101, 2112, 1997, 3671, 6364, 1019, 1012, 5057, 1011, 4646, 2030, 2974, 102], ] ) return input_ids, bbox, attention_mask @require_torch class BrosModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = BrosModel.from_pretrained("jinho8345/bros-base-uncased").to(torch_device) input_ids, bbox, attention_mask = prepare_bros_batch_inputs() with torch.no_grad(): outputs = model( input_ids.to(torch_device), bbox.to(torch_device), attention_mask=attention_mask.to(torch_device), return_dict=True, ) expected_shape = torch.Size((2, 13, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.3074, 0.1363, 0.3143], [0.0925, -0.1155, 0.1050], [0.0221, 0.0003, 0.1285]] ).to(torch_device) torch.set_printoptions(sci_mode=False) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
codingutf8 2020 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license xxx the default common tokenizer tests assume that every id is decodable on its own this assumption is invalid for byt5 because single bytes might not be valid utf8 byte 128 for instance here we re overriding the smallest possible method to provide a clean sequence without making the same assumption toksstr t1 for t in toks ensure consistency decoding decoding encodedecode but with encode instead of call check if inputids are returned and no decoderinputids cannot use default saveandloadtokenzier test method because tokenzier has no vocab safety check on maxlen default value so we are sure the test works now let s start the test isolate this from the other tests because we save additional tokensetc isolate this from the other tests because we save additional tokensetc there is a conflict between the default value of extraids and adding a new special token through additionalspecialtokens we need to add the extraids in the list of the arg additionalspecialtokens the following checks allow us to verify that our test works as expected i e that the tokenizer takes into account the new value of additionalspecialtokens given in the tokenizerconfig json and specialtokensmap json files self assertinanadditionalspecialtoken tokenizerwithoutchangeininit getvocab byt5tokenization no vocab now we test that we can change the value of additionalspecialtokens in the frompretrained tokenizer can be instantiated without any pretrained files so no need for pretrained tokenizer list tokenizer does not have vocabulary inputs cannot be pretokenized since ids depend on whole input string and not just on single characters tests all ids in vocab vocab doesn t exist so unnecessary to test the default common tokenizer tests uses invalid tokens for byt5 that can only accept onecharacter strings and special added tokens as tokens we need a different implementation of the test of the same name defined in tokenizertestermixin because this tokenizer doesn t have a vocab coding utf 8 2020 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license xxx the default common tokenizer tests assume that every id is decodable on its own this assumption is invalid for byt5 because single bytes might not be valid utf 8 byte 128 for instance here we re overriding the smallest possible method to provide a clean sequence without making the same assumption toks_str t 1 for t in toks ensure consistency decoding decoding encode decode but with encode instead of __call__ fmt skip check if input_ids are returned and no decoder_input_ids fmt skip fmt skip cannot use default save_and_load_tokenzier test method because tokenzier has no vocab safety check on max_len default value so we are sure the test works now let s start the test isolate this from the other tests because we save additional tokens etc isolate this from the other tests because we save additional tokens etc there is a conflict between the default value of extra_ids and adding a new special token through additional_special_tokens we need to add the extra_ids in the list of the arg additional_special_tokens the following checks allow us to verify that our test works as expected i e that the tokenizer takes into account the new value of additional_special_tokens given in the tokenizer_config json and special_tokens_map json files self assertin an_additional_special_token tokenizer_without_change_in_init get_vocab byt5tokenization no vocab now we test that we can change the value of additional_special_tokens in the from_pretrained tokenizer can be instantiated without any pretrained files so no need for pretrained tokenizer list tokenizer does not have vocabulary inputs cannot be pretokenized since ids depend on whole input string and not just on single characters tests all ids in vocab vocab doesn t exist so unnecessary to test the default common tokenizer tests uses invalid tokens for byt5 that can only accept one character strings and special added tokens as tokens we need a different implementation of the test of the same name defined in tokenizertestermixin because this tokenizer doesn t have a vocab
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByT5Tokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" class ByT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = ByT5Tokenizer test_rust_tokenizer = False def setUp(self): super().setUp() tokenizer = ByT5Tokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def t5_base_tokenizer(self): return ByT5Tokenizer.from_pretrained("google/byt5-small") def get_tokenizer(self, **kwargs) -> ByT5Tokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: toks = [] for i in range(len(tokenizer)): try: tok = tokenizer.decode([i], clean_up_tokenization_spaces=False) except UnicodeDecodeError: pass toks.append((i, tok)) toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks toks_ids = [t[0] for t in toks] output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def test_eos_treatment(self): tokenizer = self.t5_base_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_multibytes_char(self): tokenizer = self.t5_base_tokenizer src_text = "Unicode €." encoded = tokenizer(src_text) encoded_ids = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"], encoded_ids) decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "Unicode €.</s>") encoded = tokenizer("e è é ê ë") encoded_ids = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"], encoded_ids) decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "e è é ê ë</s>") self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "e è é ê ë</s>") def test_prepare_batch_integration(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 37), batch.input_ids.shape) self.assertEqual((2, 37), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length_integration(self): tokenizer = self.t5_base_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_eos_in_input(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] expected_tgt_tokens = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) def test_save_and_load_tokenizer(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(125)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_decode_single_bytes(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) tokenizer = tokenizer_class.from_pretrained(tmp_dir) self.assertTrue(tokenizer.decode([255]) == "") def test_pretrained_model_lists(self): pass def test_get_vocab(self): pass def test_pretokenized_inputs(self): pass def test_conversion_reversible(self): pass def test_convert_tokens_to_string_format(self): tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str) def test_tokenizers_common_ids_setters(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] token_id_to_test_setters = 0 token_to_test_setters = tokenizer.convert_ids_to_tokens( token_id_to_test_setters, skip_special_tokens=False ) for attr in attributes_list: setattr(tokenizer, attr + "_id", None) self.assertEqual(getattr(tokenizer, attr), None) self.assertEqual(getattr(tokenizer, attr + "_id"), None) setattr(tokenizer, attr + "_id", token_id_to_test_setters) self.assertEqual(getattr(tokenizer, attr), token_to_test_setters) self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters) setattr(tokenizer, "additional_special_tokens_ids", []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), []) setattr(tokenizer, "additional_special_tokens_ids", [token_id_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [token_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [token_id_to_test_setters])
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license compare the actual values for a slice camembert torch hub load pytorchfairseq camembert v0 camembert eval expectedslice roberta model forwardinputids0 3 3 detach coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license j aime le camembert compare the actual values for a slice camembert torch hub load pytorch fairseq camembert v0 camembert eval expected_slice roberta model forward input_ids 0 3 3 detach
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): import torch from transformers import CamembertModel @require_torch @require_sentencepiece @require_tokenizers class CamembertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = CamembertModel.from_pretrained("camembert-base") model.to(torch_device) input_ids = torch.tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]], device=torch_device, dtype=torch.long, ) with torch.no_grad(): output = model(input_ids)["last_hidden_state"] expected_shape = torch.Size((1, 10, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]], device=torch_device, dtype=torch.float, ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license compare the actual values for a slice camembert torch hub load pytorchfairseq camembert v0 camembert eval expectedslice roberta model forwardinputids0 3 3 detach coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license j aime le camembert compare the actual values for a slice camembert torch hub load pytorch fairseq camembert v0 camembert eval expected_slice roberta model forward input_ids 0 3 3 detach
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class TFCamembertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = TFCamembertModel.from_pretrained("jplu/tf-camembert-base") input_ids = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]], dtype=tf.int32, ) output = model(input_ids)["last_hidden_state"] expected_shape = tf.TensorShape((1, 10, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]], dtype=tf.float32, ) self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
codingutf8 2018 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token pad tokenid 1 1 is the offset id but in the spm vocab it s 3 self assertequalself gettokenizer converttokenstoidstoken tokenid self assertequalself gettokenizer convertidstotokenstokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 snotused self assertequalvocabkeys1 pad self assertequalvocabkeys1 mask self assertequallenvocabkeys 1005 def testvocabsizeself self assertequalself gettokenizer vocabsize 1000 def testrustandpythonbpetokenizersself tokenizer camemberttokenizersamplebpevocab tokenizer savepretrainedself tmpdirname rusttokenizer camemberttokenizerfast frompretrainedself tmpdirname sequence i was born in 92000 and this is fals ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids unk tokens are not the same for rust than for slow because spm gives back raw token instead of unk in encodeaspieces tokens tokenizer tokenizesequence tokens tokenizer convertidstotokensids rusttokens rusttokenizer tokenizesequence self assertlistequaltokens rusttokens def testrustandpythonfulltokenizersself if not self testrusttokenizer return tokenizer self gettokenizer rusttokenizer self getrusttokenizer sequence i was born in 92000 and this is fals tokens tokenizer tokenizesequence rusttokens rusttokenizer tokenizesequence self assertlistequaltokens rusttokens ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids rusttokenizer self getrusttokenizer ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids slow def testtokenizerintegrationself expectedencoding inputids 5 54 7196 297 30 23 776 18 11 3215 3705 8252 22 3164 1181 2116 29 16 813 25 791 3314 20 3446 38 27575 120 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 468 17 11 9088 20 1517 8 22804 18818 10 38 629 607 607 142 19 7196 867 56 10326 24 2267 20 416 5072 15612 233 734 7 2399 27 16 3015 1649 7 24 20 4338 2399 27 13 3400 14 13 6189 8 930 9 6 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 fmt skip camembert is a french model so we also use french texts sequences le transformeur est un modle d apprentissage profond introduit en 2017 utilis principalement dans le domaine du traitement automatique des langues tal l instar des rseaux de neurones rcurrents rnn les transformeurs sont conus pour grer des donnes squentielles telles que le langage naturel pour des tches telles que la traduction et la synthse de texte self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamecamembertbase revision3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf sequencessequences overwritten because we have to use from slow online pretrained is wrong the tokenizer json has a whole def testaddedtokensserializationself self maxdiff none utility to test the added vocab def testaddedvocabandeosexpected tokenizerclass expectedeos tempdir tokenizer tokenizerclass frompretrainedtempdir self asserttruestrexpectedeos not in tokenizer additionalspecialtokens self assertinneweos tokenizer addedtokensdecoder values self assertequaltokenizer addedtokensdecodertokenizer eostokenid neweos self assertdictequalexpected tokenizer addedtokensdecoder return tokenizer neweos addedtokenneweos rstripfalse lstriptrue normalizedfalse for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname load a slow tokenizer from the hub init with the new token for fast to also include it tokenizer self tokenizerclass frompretrainedpretrainedname eostokenneweos expectedaddedtokensdecoder tokenizer addedtokensdecoder with self subtesthub slow test loading a slow tokenizer from the hub self assertequaltokenizer eostoken neweos self assertinneweos listtokenizer addedtokensdecoder values with tempfile temporarydirectory as tmpdir2 tokenizer savepretrainedtmpdir2 with self subtest hub slow slow test saving this slow tokenizer and reloading it in the fast class testaddedvocabandeos expectedaddedtokensdecoder self tokenizerclass neweos tmpdir2 if self rusttokenizerclass is not none with self subtest hub slow fast test saving this slow tokenizer and reloading it in the fast class tokenizerfast testaddedvocabandeos expectedaddedtokensdecoder self rusttokenizerclass neweos tmpdir2 with tempfile temporarydirectory as tmpdir3 tokenizerfast savepretrainedtmpdir3 with self subtest hub slow fast fast test saving this fast tokenizer and reloading it in the fast class testaddedvocabandeos expectedaddedtokensdecoder self rusttokenizerclass neweos tmpdir3 with self subtest hub slow fast slow test saving this slow tokenizer and reloading it in the slow class testaddedvocabandeos expectedaddedtokensdecoder self rusttokenizerclass neweos tmpdir3 with self subtesthub fast test loading a fast tokenizer from the hub if self rusttokenizerclass is not none tokenizerfast self rusttokenizerclass frompretrained pretrainedname eostokenneweos fromslowtrue self assertequaltokenizerfast eostoken neweos self assertinneweos listtokenizerfast addedtokensdecoder values we can t test the following because for bc we kept the default rstrip lstrip in slow not fast will comment once normalization is alright with self subtesthub fast hub slow make sure slow and fast tokenizer match self assertdictequalexpectedaddedtokensdecoder tokenizerfast addedtokensdecoder expectedaddedtokensdecoder tokenizerfast addedtokensdecoder with tempfile temporarydirectory as tmpdir4 tokenizerfast savepretrainedtmpdir4 with self subtesthub fast fast saving fast1 locally and loading testaddedvocabandeos expectedaddedtokensdecoder self rusttokenizerclass neweos tmpdir4 with self subtesthub fast slow saving fast1 locally and loading testaddedvocabandeos expectedaddedtokensdecoder self tokenizerclass neweos tmpdir4 coding utf 8 2018 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token 1 is the offset id but in the spm vocab it s 3 unk tokens are not the same for rust than for slow because spm gives back raw token instead of unk in encodeaspieces tokens tokenizer tokenize sequence fmt skip camembert is a french model so we also use french texts overwritten because we have to use from slow online pretrained is wrong the tokenizer json has a whole utility to test the added vocab load a slow tokenizer from the hub init with the new token for fast to also include it we can t test the following because for bc we kept the default rstrip lstrip in slow not fast will comment once normalization is alright
import tempfile import unittest from transformers import AddedToken, CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") SAMPLE_BPE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe.model") FRAMEWORK = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class CamembertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = CamembertTokenizer rust_tokenizer_class = CamembertTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() tokenizer = CamembertTokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) @unittest.skip( "Token maps are not equal because someone set the probability of ('<unk>NOTUSED', -100), so it's never encoded for fast" ) def test_special_tokens_map_equal(self): return def test_convert_token_and_id(self): token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer().convert_tokens_to_ids(token), token_id) self.assertEqual(self.get_tokenizer().convert_ids_to_tokens(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>NOTUSED") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-1], "<mask>") self.assertEqual(len(vocab_keys), 1_005) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_000) def test_rust_and_python_bpe_tokenizers(self): tokenizer = CamembertTokenizer(SAMPLE_BPE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) rust_tokenizer = CamembertTokenizerFast.from_pretrained(self.tmpdirname) sequence = "I was born in 92000, and this is falsé." ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) tokens = tokenizer.convert_ids_to_tokens(ids) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} sequences = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="camembert-base", revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf", sequences=sequences, ) def test_added_tokens_serialization(self): self.maxDiff = None def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir): tokenizer = tokenizer_class.from_pretrained(temp_dir) self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens) self.assertIn(new_eos, tokenizer.added_tokens_decoder.values()) self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos) self.assertDictEqual(expected, tokenizer.added_tokens_decoder) return tokenizer new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer = self.tokenizer_class.from_pretrained(pretrained_name, eos_token=new_eos) EXPECTED_ADDED_TOKENS_DECODER = tokenizer.added_tokens_decoder with self.subTest("Hub -> Slow: Test loading a slow tokenizer from the hub)"): self.assertEqual(tokenizer._eos_token, new_eos) self.assertIn(new_eos, list(tokenizer.added_tokens_decoder.values())) with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer.save_pretrained(tmp_dir_2) with self.subTest( "Hub -> Slow -> Slow: Test saving this slow tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_2 ) if self.rust_tokenizer_class is not None: with self.subTest( "Hub -> Slow -> Fast: Test saving this slow tokenizer and reloading it in the fast class" ): tokenizer_fast = _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_2 ) with tempfile.TemporaryDirectory() as tmp_dir_3: tokenizer_fast.save_pretrained(tmp_dir_3) with self.subTest( "Hub -> Slow -> Fast -> Fast: Test saving this fast tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest( "Hub -> Slow -> Fast -> Slow: Test saving this slow tokenizer and reloading it in the slow class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest("Hub -> Fast: Test loading a fast tokenizer from the hub)"): if self.rust_tokenizer_class is not None: tokenizer_fast = self.rust_tokenizer_class.from_pretrained( pretrained_name, eos_token=new_eos, from_slow=True ) self.assertEqual(tokenizer_fast._eos_token, new_eos) self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values())) with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"): self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder) EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder with tempfile.TemporaryDirectory() as tmp_dir_4: tokenizer_fast.save_pretrained(tmp_dir_4) with self.subTest("Hub -> Fast -> Fast: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_4 ) with self.subTest("Hub -> Fast -> Slow: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_4 )
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch canine model import unittest from typing import list tuple from transformers import canineconfig istorchavailable from transformers testingutils import requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit globalrng idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import canineformultiplechoice canineforquestionanswering canineforsequenceclassification caninefortokenclassification caninemodel from transformers models canine modelingcanine import caninepretrainedmodelarchivelist class caninemodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue let s use a vocab size that s way bigger than bert s one note this is not a model parameter just an input vocabsize100000 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 numhashbuckets16 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self numhashbuckets numhashbuckets self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorinputids shape self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels def getconfigself return canineconfig hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange numhashbucketsself numhashbuckets def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model caninemodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforquestionanswering self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model canineforquestionansweringconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforsequenceclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model canineforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model caninefortokenclassificationconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckformultiplechoice self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numchoices self numchoices model canineformultiplechoiceconfigconfig model totorchdevice model eval multiplechoiceinputsids inputids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoicetokentypeids tokentypeids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceinputmask inputmask unsqueeze1 expand1 self numchoices 1 contiguous result model multiplechoiceinputsids attentionmaskmultiplechoiceinputmask tokentypeidsmultiplechoicetokentypeids labelschoicelabels self parent assertequalresult logits shape self batchsize self numchoices def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class caninemodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses caninemodel canineformultiplechoice canineforquestionanswering canineforsequenceclassification caninefortokenclassification if istorchavailable else pipelinemodelmapping featureextraction caninemodel questionanswering canineforquestionanswering textclassification canineforsequenceclassification tokenclassification caninefortokenclassification zeroshot canineforsequenceclassification if istorchavailable else testmismatchedshapes false testresizeembeddings false testpruning false def setupself self modeltester caninemodeltesterself we set hastextmodality to false as the config has no vocabsize attribute self configtester configtesterself configclasscanineconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers equals numhiddenlayers of the deep encoder 1 2 for the first shallow encoder 2 for the final shallow encoder expectednumlayers self modeltester numhiddenlayers 1 2 2 self assertequallenhiddenstates expectednumlayers seqlength self modeltester seqlength for i in rangeexpectednumlayers if i 2 or expectednumlayers i 3 the expected length of the hiddenstates of the first and final shallow encoders is equal to the seqlength self assertlistequal listhiddenstatesi shape2 seqlength self modeltester hiddensize else the expected length of the hiddenstates of the deep encoder need to be updated for canine since the seq length is downsampled self assertlistequal listhiddenstatesi shape2 seqlength config downsamplingrate self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions we add 2 due to the 2 shallow encoders self assertequallenattentions self modeltester numhiddenlayers 2 check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions we add 2 due to the 2 shallow encoders self assertequallenattentions self modeltester numhiddenlayers 2 self assertlistequal listattentions0 shape3 self modeltester numattentionheads seqlen seqlen outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers 2 self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads seqlen seqlen def testmodeloutputsequivalenceself config inputsdict self modeltester prepareconfigandinputsforcommon def setnantensortozerot tt t 0 return t def checkequivalencemodel tupleinputs dictinputs additionalkwargs with torch nograd tupleoutput modeltupleinputs returndictfalse additionalkwargs dictoutput modeldictinputs returndicttrue additionalkwargs totuple def recursivechecktupleobject dictobject if isinstancetupleobject list tuple for tupleiterablevalue dictiterablevalue in ziptupleobject dictobject recursivechecktupleiterablevalue dictiterablevalue elif tupleobject is none return else self asserttrue torch allclose setnantensortozerotupleobject setnantensortozerodictobject atol1e5 msg tuple and dict output are not equal difference f torch maxtorch abstupleobject dictobject tuple has nan f torch isnantupleobject any and inf torch isinftupleobject dict has f nan torch isnandictobject any and inf torch isinfdictobject recursivechecktupleoutput dictoutput for modelclass in self allmodelclasses printmodelclass model modelclassconfig model totorchdevice model eval tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputhiddenstates true tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputattentions true tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs outputhiddenstates true tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs outputattentions true tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalence model tupleinputs dictinputs outputhiddenstates true outputattentions true def testheadmaskingself if not self testheadmasking return globalrng seed42 config inputsdict self modeltester prepareconfigandinputsforcommon globalrng seed inputsdictoutputattentions true config outputhiddenstates true configsnoinit configzeroinitconfig to be sure we have no nan for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval prepare headmask set requiregrad after having prepared the tensor to avoid error leaf variable has been moved into the graph interior headmask torch ones self modeltester numhiddenlayers self modeltester numattentionheads devicetorchdevice headmask0 0 0 headmask1 1 0 headmask requiresgradrequiresgradtrue inputs self prepareforclassinputsdict modelclass copy inputsheadmask headmask outputs modelinputs returndicttrue test that we can get a gradient back for importance score computation output sumt sum for t in outputs0 output output sum output backward multiheadoutputs headmask grad self assertisnotnonemultiheadoutputs self assertequallenmultiheadoutputs self modeltester numhiddenlayers def checkattentionsvalidityattentions remove nan for t in attentions self assertless torch sumtorch isnant t numel 4 check we don t have more than 25 nans arbitrary attentions t maskedfilltorch isnant 0 0 for t in attentions remove them the test is less complete self assertalmostequalattentions1 0 flatten sum item 0 0 self assertnotequalattentions1 1 flatten sum item 0 0 self assertalmostequalattentions2 2 flatten sum item 0 0 self assertnotequalattentions2 1 flatten sum item 0 0 checkattentionsvalidityoutputs attentions unittest skipcanine does not have a getinputembeddings method def testinputsembedsself vit does not use inputsembeds pass unittest skipcanine does not have a getinputembeddings method def testmodelcommonattributesself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass slow def testmodelfrompretrainedself for modelname in caninepretrainedmodelarchivelist 1 model caninemodel frompretrainedmodelname self assertisnotnonemodel requiretorch class caninemodelintegrationtestunittest testcase slow def testinferencenoheadself model caninemodel frompretrainedgooglecanines this one corresponds to the first example of the tydiqa dev set in swahili fmt off inputids 57344 57349 85 107 117 98 119 97 32 119 97 32 82 105 106 105 108 105 32 75 97 110 116 111 114 105 32 110 105 32 107 105 97 115 105 32 103 97 110 105 63 57345 57350 32 82 105 106 105 108 105 32 75 97 110 116 111 114 105 32 44 32 82 105 106 105 108 105 32 75 97 110 116 97 114 117 115 105 32 97 117 32 105 110 103 46 32 65 108 112 104 97 32 67 101 110 116 97 117 114 105 32 40 112 105 97 58 32 84 111 108 105 109 97 110 32 97 117 32 82 105 103 105 108 32 75 101 110 116 97 117 114 117 115 41 32 110 105 32 110 121 111 116 97 32 105 110 97 121 111 110 103 39 97 97 32 115 97 110 97 32 107 97 116 105 107 97 32 97 110 103 97 32 121 97 32 107 117 115 105 110 105 32 107 119 101 110 121 101 32 107 117 110 100 105 110 121 111 116 97 32 121 97 32 75 97 110 116 97 114 117 115 105 32 40 112 105 97 58 32 105 110 103 46 32 67 101 110 116 97 117 114 117 115 41 46 32 78 105 32 110 121 111 116 97 32 121 97 32 107 117 110 103 97 97 32 115 97 110 97 32 121 97 32 110 110 101 32 97 110 103 97 110 105 32 108 97 107 105 110 105 32 104 97 105 111 110 101 107 97 110 105 32 107 119 101 110 121 101 32 110 117 115 117 100 117 110 105 97 32 121 97 32 107 97 115 107 97 122 105 110 105 46 32 57351 32 65 108 112 104 97 32 67 101 110 116 97 117 114 105 32 110 105 32 110 121 111 116 97 32 121 97 32 112 101 107 101 101 32 107 119 97 32 115 97 98 97 98 117 32 110 105 32 110 121 111 116 97 32 121 101 116 117 32 106 105 114 97 110 105 32 107 97 116 105 107 97 32 97 110 103 97 32 105 110 97 32 117 109 98 97 108 105 32 119 97 32 109 105 97 107 97 32 121 97 32 110 117 114 117 32 52 46 50 46 32 73 110 97 111 110 101 107 97 110 97 32 97 110 103 97 110 105 32 107 97 114 105 98 117 32 110 97 32 107 117 110 100 105 110 121 111 116 97 32 121 97 32 83 97 108 105 98 117 32 40 67 114 117 120 41 46 32 57352 32 82 105 106 105 108 105 32 75 97 110 116 97 114 117 115 105 32 40 65 108 112 104 97 32 67 101 110 116 97 117 114 105 41 32 105 110 97 111 110 101 107 97 110 97 32 107 97 109 97 32 110 121 111 116 97 32 109 111 106 97 32 108 97 107 105 110 105 32 107 119 97 32 100 97 114 117 98 105 110 105 32 107 117 98 119 97 32 105 110 97 111 110 101 107 97 110 97 32 107 117 119 97 32 109 102 117 109 111 32 119 97 32 110 121 111 116 97 32 116 97 116 117 32 122 105 110 97 122 111 107 97 97 32 107 97 114 105 98 117 32 110 97 32 107 117 115 104 105 107 97 109 97 110 97 32 107 97 116 105 32 121 97 111 46 32 78 121 111 116 97 32 109 97 112 97 99 104 97 32 122 97 32 65 108 112 104 97 32 67 101 110 116 97 117 114 105 32 65 32 110 97 32 65 108 112 104 97 32 67 101 110 116 97 117 114 105 32 66 32 122 105 107 111 32 109 105 97 107 97 32 121 97 32 110 117 114 117 32 52 46 51 54 32 107 117 116 111 107 97 32 107 119 101 116 117 32 110 97 32 110 121 111 116 97 32 121 97 32 116 97 116 117 32 65 108 112 104 97 32 67 101 110 116 97 117 114 105 32 67 32 97 117 32 80 114 111 120 105 109 97 32 67 101 110 116 97 117 114 105 32 105 110 97 32 117 109 98 97 108 105 32 119 97 32 109 105 97 107 97 32 121 97 32 110 117 114 117 32 52 46 50 50 46 32 57353 32 80 114 111 120 105 109 97 32 67 101 110 116 97 117 114 105 32 40 121 97 97 110 105 32 110 121 111 116 97 32 121 97 32 75 97 110 116 97 114 117 115 105 32 105 108 105 121 111 32 107 97 114 105 98 117 32 122 97 105 100 105 32 110 97 115 105 41 32 105 109 101 103 117 110 100 117 108 105 119 97 32 107 117 119 97 32 110 97 32 115 97 121 97 114 105 32 109 111 106 97 46 32 86 105 112 105 109 111 32 118 105 110 97 118 121 111 112 97 116 105 107 97 110 97 32 104 97 100 105 32 115 97 115 97 32 122 105 110 97 111 110 121 101 115 104 97 32 117 119 101 122 101 107 97 110 111 32 109 107 117 98 119 97 32 121 97 32 107 119 97 109 98 97 32 115 97 121 97 114 105 32 104 105 105 32 110 105 32 121 97 32 109 119 97 109 98 97 32 40 107 97 109 97 32 100 117 110 105 97 32 121 101 116 117 44 32 77 105 114 105 104 105 32 97 117 32 90 117 104 117 114 97 41 32 110 97 32 105 110 97 119 101 122 97 32 107 117 119 97 32 110 97 32 97 110 103 97 104 101 119 97 44 32 116 101 110 97 32 107 97 116 105 107 97 32 117 112 101 111 32 119 97 32 106 111 116 111 32 117 110 97 111 114 117 104 117 115 117 32 107 117 119 101 112 111 32 107 119 97 32 117 104 97 105 46 32 91 49 93 57345 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 attentionmask 1 if x 0 else 0 for x in inputids tokentypeids 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt on inputids torch tensorinputids attentionmask torch tensorattentionmask tokentypeids torch tensortokentypeids outputs modelinputids attentionmask tokentypeids verify sequence output expectedshape torch size1 2048 768 self assertequaloutputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 161433131 0 395568609 0 0407391489 0 108025983 0 362060368 0 544592619 0 141537309 0 180541009 0 076907 self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e2 verify pooled output expectedshape torch size1 768 self assertequaloutputs pooleroutput shape expectedshape expectedslice torch tensor0 884311497 0 529064834 0 723164916 self asserttruetorch allcloseoutputs pooleroutput0 3 expectedslice atol1e2 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch canine model let s use a vocab size that s way bigger than bert s one note this is not a model parameter just an input we set has_text_modality to false as the config has no vocab_size attribute expected_num_layers equals num_hidden_layers of the deep encoder 1 2 for the first shallow encoder 2 for the final shallow encoder the expected length of the hidden_states of the first and final shallow encoders is equal to the seq_length the expected length of the hidden_states of the deep encoder need to be updated for canine since the seq length is downsampled check that output_hidden_states also work using config we add 2 due to the 2 shallow encoders check that output_attentions also work using config we add 2 due to the 2 shallow encoders check attention is always last and order is fine to be sure we have no nan prepare head_mask set require_grad after having prepared the tensor to avoid error leaf variable has been moved into the graph interior test that we can get a gradient back for importance score computation remove nan check we don t have more than 25 nans arbitrary remove them the test is less complete vit does not use inputs_embeds this one corresponds to the first example of the tydiqa dev set in swahili fmt off fmt on verify sequence output verify pooled output
import unittest from typing import List, Tuple from transformers import CanineConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, global_rng, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineModel, ) from transformers.models.canine.modeling_canine import CANINE_PRETRAINED_MODEL_ARCHIVE_LIST class CanineModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=100000, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, num_hash_buckets=16, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.num_hash_buckets = num_hash_buckets self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor(input_ids.shape, self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return CanineConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, num_hash_buckets=self.num_hash_buckets, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = CanineModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = CanineForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = CanineForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = CanineForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = CanineForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CanineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( CanineModel, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": CanineModel, "question-answering": CanineForQuestionAnswering, "text-classification": CanineForSequenceClassification, "token-classification": CanineForTokenClassification, "zero-shot": CanineForSequenceClassification, } if is_torch_available() else {} ) test_mismatched_shapes = False test_resize_embeddings = False test_pruning = False def setUp(self): self.model_tester = CanineModelTester(self) self.config_tester = ConfigTester(self, config_class=CanineConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 + 2 + 2 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length for i in range(expected_num_layers): if (i < 2) or ((expected_num_layers - i) < 3): self.assertListEqual( list(hidden_states[i].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) else: self.assertListEqual( list(hidden_states[i].shape[-2:]), [seq_length // config.downsampling_rate, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers + 2) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers + 2) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers + 2) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: print(model_class) model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_headmasking(self): if not self.test_head_masking: return global_rng.seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() global_rng.seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() head_mask = torch.ones( self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device, ) head_mask[0, 0] = 0 head_mask[-1, :-1] = 0 head_mask.requires_grad_(requires_grad=True) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask outputs = model(**inputs, return_dict=True) output = sum(t.sum() for t in outputs[0]) output = output.sum() output.backward() multihead_outputs = head_mask.grad self.assertIsNotNone(multihead_outputs) self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) def check_attentions_validity(attentions): for t in attentions: self.assertLess( torch.sum(torch.isnan(t)), t.numel() / 4 ) attentions = [ t.masked_fill(torch.isnan(t), 0.0) for t in attentions ] self.assertAlmostEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[1][..., -1, :, :].flatten().sum().item(), 0.0) self.assertAlmostEqual(attentions[-2][..., -2, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[-2][..., -1, :, :].flatten().sum().item(), 0.0) check_attentions_validity(outputs.attentions) @unittest.skip("CANINE does not have a get_input_embeddings() method.") def test_inputs_embeds(self): pass @unittest.skip("CANINE does not have a get_input_embeddings() method.") def test_model_common_attributes(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): for model_name in CANINE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CanineModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class CanineModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = CanineModel.from_pretrained("google/canine-s") input_ids = [57344, 57349, 85, 107, 117, 98, 119, 97, 32, 119, 97, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 111, 114, 105, 32, 110, 105, 32, 107, 105, 97, 115, 105, 32, 103, 97, 110, 105, 63, 57345, 57350, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 111, 114, 105, 32, 44, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 97, 117, 32, 105, 110, 103, 46, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 40, 112, 105, 97, 58, 32, 84, 111, 108, 105, 109, 97, 110, 32, 97, 117, 32, 82, 105, 103, 105, 108, 32, 75, 101, 110, 116, 97, 117, 114, 117, 115, 41, 32, 110, 105, 32, 110, 121, 111, 116, 97, 32, 105, 110, 97, 121, 111, 110, 103, 39, 97, 97, 32, 115, 97, 110, 97, 32, 107, 97, 116, 105, 107, 97, 32, 97, 110, 103, 97, 32, 121, 97, 32, 107, 117, 115, 105, 110, 105, 32, 107, 119, 101, 110, 121, 101, 32, 107, 117, 110, 100, 105, 110, 121, 111, 116, 97, 32, 121, 97, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 40, 112, 105, 97, 58, 32, 105, 110, 103, 46, 32, 67, 101, 110, 116, 97, 117, 114, 117, 115, 41, 46, 32, 78, 105, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 107, 117, 110, 103, 97, 97, 32, 115, 97, 110, 97, 32, 121, 97, 32, 110, 110, 101, 32, 97, 110, 103, 97, 110, 105, 32, 108, 97, 107, 105, 110, 105, 32, 104, 97, 105, 111, 110, 101, 107, 97, 110, 105, 32, 107, 119, 101, 110, 121, 101, 32, 110, 117, 115, 117, 100, 117, 110, 105, 97, 32, 121, 97, 32, 107, 97, 115, 107, 97, 122, 105, 110, 105, 46, 32, 57351, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 110, 105, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 112, 101, 107, 101, 101, 32, 107, 119, 97, 32, 115, 97, 98, 97, 98, 117, 32, 110, 105, 32, 110, 121, 111, 116, 97, 32, 121, 101, 116, 117, 32, 106, 105, 114, 97, 110, 105, 32, 107, 97, 116, 105, 107, 97, 32, 97, 110, 103, 97, 32, 105, 110, 97, 32, 117, 109, 98, 97, 108, 105, 32, 119, 97, 32, 109, 105, 97, 107, 97, 32, 121, 97, 32, 110, 117, 114, 117, 32, 52, 46, 50, 46, 32, 73, 110, 97, 111, 110, 101, 107, 97, 110, 97, 32, 97, 110, 103, 97, 110, 105, 32, 107, 97, 114, 105, 98, 117, 32, 110, 97, 32, 107, 117, 110, 100, 105, 110, 121, 111, 116, 97, 32, 121, 97, 32, 83, 97, 108, 105, 98, 117, 32, 40, 67, 114, 117, 120, 41, 46, 32, 57352, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 40, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 41, 32, 105, 110, 97, 111, 110, 101, 107, 97, 110, 97, 32, 107, 97, 109, 97, 32, 110, 121, 111, 116, 97, 32, 109, 111, 106, 97, 32, 108, 97, 107, 105, 110, 105, 32, 107, 119, 97, 32, 100, 97, 114, 117, 98, 105, 110, 105, 32, 107, 117, 98, 119, 97, 32, 105, 110, 97, 111, 110, 101, 107, 97, 110, 97, 32, 107, 117, 119, 97, 32, 109, 102, 117, 109, 111, 32, 119, 97, 32, 110, 121, 111, 116, 97, 32, 116, 97, 116, 117, 32, 122, 105, 110, 97, 122, 111, 107, 97, 97, 32, 107, 97, 114, 105, 98, 117, 32, 110, 97, 32, 107, 117, 115, 104, 105, 107, 97, 109, 97, 110, 97, 32, 107, 97, 116, 105, 32, 121, 97, 111, 46, 32, 78, 121, 111, 116, 97, 32, 109, 97, 112, 97, 99, 104, 97, 32, 122, 97, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 65, 32, 110, 97, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 66, 32, 122, 105, 107, 111, 32, 109, 105, 97, 107, 97, 32, 121, 97, 32, 110, 117, 114, 117, 32, 52, 46, 51, 54, 32, 107, 117, 116, 111, 107, 97, 32, 107, 119, 101, 116, 117, 32, 110, 97, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 116, 97, 116, 117, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 67, 32, 97, 117, 32, 80, 114, 111, 120, 105, 109, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 105, 110, 97, 32, 117, 109, 98, 97, 108, 105, 32, 119, 97, 32, 109, 105, 97, 107, 97, 32, 121, 97, 32, 110, 117, 114, 117, 32, 52, 46, 50, 50, 46, 32, 57353, 32, 80, 114, 111, 120, 105, 109, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 40, 121, 97, 97, 110, 105, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 105, 108, 105, 121, 111, 32, 107, 97, 114, 105, 98, 117, 32, 122, 97, 105, 100, 105, 32, 110, 97, 115, 105, 41, 32, 105, 109, 101, 103, 117, 110, 100, 117, 108, 105, 119, 97, 32, 107, 117, 119, 97, 32, 110, 97, 32, 115, 97, 121, 97, 114, 105, 32, 109, 111, 106, 97, 46, 32, 86, 105, 112, 105, 109, 111, 32, 118, 105, 110, 97, 118, 121, 111, 112, 97, 116, 105, 107, 97, 110, 97, 32, 104, 97, 100, 105, 32, 115, 97, 115, 97, 32, 122, 105, 110, 97, 111, 110, 121, 101, 115, 104, 97, 32, 117, 119, 101, 122, 101, 107, 97, 110, 111, 32, 109, 107, 117, 98, 119, 97, 32, 121, 97, 32, 107, 119, 97, 109, 98, 97, 32, 115, 97, 121, 97, 114, 105, 32, 104, 105, 105, 32, 110, 105, 32, 121, 97, 32, 109, 119, 97, 109, 98, 97, 32, 40, 107, 97, 109, 97, 32, 100, 117, 110, 105, 97, 32, 121, 101, 116, 117, 44, 32, 77, 105, 114, 105, 104, 105, 32, 97, 117, 32, 90, 117, 104, 117, 114, 97, 41, 32, 110, 97, 32, 105, 110, 97, 119, 101, 122, 97, 32, 107, 117, 119, 97, 32, 110, 97, 32, 97, 110, 103, 97, 104, 101, 119, 97, 44, 32, 116, 101, 110, 97, 32, 107, 97, 116, 105, 107, 97, 32, 117, 112, 101, 111, 32, 119, 97, 32, 106, 111, 116, 111, 32, 117, 110, 97, 111, 114, 117, 104, 117, 115, 117, 32, 107, 117, 119, 101, 112, 111, 32, 107, 119, 97, 32, 117, 104, 97, 105, 46, 32, 91, 49, 93, 57345, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] attention_mask = [1 if x != 0 else 0 for x in input_ids] token_type_ids = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] input_ids = torch.tensor([input_ids]) attention_mask = torch.tensor([attention_mask]) token_type_ids = torch.tensor([token_type_ids]) outputs = model(input_ids, attention_mask, token_type_ids) expected_shape = torch.Size((1, 2048, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [ [-0.161433131, 0.395568609, 0.0407391489], [-0.108025983, 0.362060368, -0.544592619], [-0.141537309, 0.180541009, 0.076907], ] ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-2)) expected_shape = torch.Size((1, 768)) self.assertEqual(outputs.pooler_output.shape, expected_shape) expected_slice = torch.tensor([-0.884311497, -0.529064834, 0.723164916]) self.assertTrue(torch.allclose(outputs.pooler_output[0, :3], expected_slice, atol=1e-2))
codingutf8 2021 google ai and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check if inputids attentionmask and tokentypeids are returned cannot use default saveandloadtokenzier test method because tokenzier has no vocab safety check on maxlen default value so we are sure the test works now let s start the test isolate this from the other tests because we save additional tokensetc isolate this from the other tests because we save additional tokensetc we can add a new special token for canine as follows a special token for canine can be defined as follows a special token for canine can be defined as follows a special token for canine can be defined as follows the following checks allow us to verify that our test works as expected i e that the tokenizer takes into account the new value of additionalspecialtokens given in the tokenizerconfig json and specialtokensmap json files self assertinanadditionalspecialtoken tokenizerwithoutchangeininit getvocab byt5tokenization no vocab now we test that we can change the value of additionalspecialtokens in the frompretrained self assertinnewtoken2 tokenizer getvocab byt5tokenization no vocab cannot use default testtokenizerscommonidssetters method because tokenizer has no vocab tokenizer has a fixed vocabsize namely all possible unicode code points caninetokenizer does not support dolowercase true as each character has its own unicode code point b and b for example have different unicode code points caninemodel does not support the getinputembeddings nor the getvocab method caninemodel does not support the getinputembeddings nor the getvocab method tokenizer can be instantiated without any pretrained files so no need for pretrained tokenizer list tokenizer does not have vocabulary inputs cannot be pretokenized since ids depend on whole input string and not just on single characters tests all ids in vocab vocab doesn t exist so unnecessary to test coding utf 8 2021 google ai and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fmt skip check if input_ids attention_mask and token_type_ids are returned cannot use default save_and_load_tokenzier test method because tokenzier has no vocab safety check on max_len default value so we are sure the test works now let s start the test isolate this from the other tests because we save additional tokens etc isolate this from the other tests because we save additional tokens etc we can add a new special token for canine as follows a special token for canine can be defined as follows a special token for canine can be defined as follows a special token for canine can be defined as follows the following checks allow us to verify that our test works as expected i e that the tokenizer takes into account the new value of additional_special_tokens given in the tokenizer_config json and special_tokens_map json files self assertin an_additional_special_token tokenizer_without_change_in_init get_vocab byt5tokenization no vocab now we test that we can change the value of additional_special_tokens in the from_pretrained self assertin new_token_2 tokenizer get_vocab byt5tokenization no vocab cannot use default test_tokenizers_common_ids_setters method because tokenizer has no vocab tokenizer has a fixed vocab_size namely all possible unicode code points caninetokenizer does not support do_lower_case true as each character has its own unicode code point b and b for example have different unicode code points caninemodel does not support the get_input_embeddings nor the get_vocab method caninemodel does not support the get_input_embeddings nor the get_vocab method tokenizer can be instantiated without any pretrained files so no need for pretrained tokenizer list tokenizer does not have vocabulary inputs cannot be pretokenized since ids depend on whole input string and not just on single characters tests all ids in vocab vocab doesn t exist so unnecessary to test
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class CanineTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = CanineTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() tokenizer = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def canine_tokenizer(self): return CanineTokenizer.from_pretrained("google/canine-s") def get_tokenizer(self, **kwargs) -> CanineTokenizer: tokenizer = self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) tokenizer._unicode_vocab_size = 1024 return tokenizer @require_torch def test_prepare_batch_integration(self): tokenizer = self.canine_tokenizer src_text = ["Life is like a box of chocolates.", "You never know what you're gonna get."] expected_src_tokens = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0] batch = tokenizer(src_text, padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) result = list(batch.input_ids.numpy()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 39), batch.input_ids.shape) self.assertEqual((2, 39), batch.attention_mask.shape) @require_torch def test_encoding_keys(self): tokenizer = self.canine_tokenizer src_text = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] batch = tokenizer(src_text, padding=True, return_tensors="pt") self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertIn("token_type_ids", batch) @require_torch def test_max_length_integration(self): tokenizer = self.canine_tokenizer tgt_text = [ "What's the weater?", "It's about 25 degrees.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors="pt" ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_save_and_load_tokenizer(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" additional_special_tokens = tokenizer.additional_special_tokens new_additional_special_token = chr(0xE007) additional_special_tokens.append(new_additional_special_token) tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertIn(new_additional_special_token, after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) def test_add_special_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): input_text, ids = self.get_clean_sequence(tokenizer) SPECIAL_TOKEN = 0xE005 special_token = chr(SPECIAL_TOKEN) tokenizer.add_special_tokens({"cls_token": special_token}) encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False) self.assertEqual(len(encoded_special_token), 1) text = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=False) encoded = tokenizer.encode(text, add_special_tokens=False) input_encoded = tokenizer.encode(input_text, add_special_tokens=False) special_token_id = tokenizer.encode(special_token, add_special_tokens=False) self.assertEqual(encoded, input_encoded + special_token_id) decoded = tokenizer.decode(encoded, skip_special_tokens=True) self.assertTrue(special_token not in decoded) def test_tokenize_special_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): SPECIAL_TOKEN_1 = chr(0xE005) SPECIAL_TOKEN_2 = chr(0xE006) tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=True) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]}) token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1) token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2) self.assertEqual(len(token_1), 1) self.assertEqual(len(token_2), 1) self.assertEqual(token_1[0], SPECIAL_TOKEN_1) self.assertEqual(token_2[0], SPECIAL_TOKEN_2) @require_tokenizers def test_added_token_serializable(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): NEW_TOKEN = 0xE006 new_token = chr(NEW_TOKEN) new_token = AddedToken(new_token, lstrip=True) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]}) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(tmp_dir_name) tokenizer.from_pretrained(tmp_dir_name) def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) NEW_TOKEN = 0xE006 new_token_1 = chr(NEW_TOKEN) special_tokens_map["additional_special_tokens"] = [new_token_1] tokenizer_config["additional_special_tokens"] = [new_token_1] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) tokenizer_without_change_in_init = tokenizer_class.from_pretrained(tmp_dir, extra_ids=0) self.assertIn(new_token_1, tokenizer_without_change_in_init.additional_special_tokens) self.assertEqual( [new_token_1], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_1]) ), ) NEW_TOKEN = 0xE007 new_token_2 = chr(NEW_TOKEN) new_added_tokens = [AddedToken(new_token_2, lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, extra_ids=0 ) self.assertIn(new_token_2, tokenizer.additional_special_tokens) self.assertEqual( [new_token_2], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_2])) ) @require_tokenizers def test_encode_decode_with_spaces(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): input = "hello world" if self.space_between_special_tokens: output = "[CLS] hello world [SEP]" else: output = input encoded = tokenizer.encode(input, add_special_tokens=False) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens) self.assertIn(decoded, [output, output.lower()]) def test_tokenizers_common_ids_setters(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] token_to_test_setters = "a" token_id_to_test_setters = ord(token_to_test_setters) for attr in attributes_list: setattr(tokenizer, attr + "_id", None) self.assertEqual(getattr(tokenizer, attr), None) self.assertEqual(getattr(tokenizer, attr + "_id"), None) setattr(tokenizer, attr + "_id", token_id_to_test_setters) self.assertEqual(getattr(tokenizer, attr), token_to_test_setters) self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters) setattr(tokenizer, "additional_special_tokens_ids", []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), []) additional_special_token_id = 0xE006 additional_special_token = chr(additional_special_token_id) setattr(tokenizer, "additional_special_tokens_ids", [additional_special_token_id]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [additional_special_token]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [additional_special_token_id]) def test_add_tokens_tokenizer(self): pass def test_added_tokens_do_lower_case(self): pass def test_np_encode_plus_sent_to_model(self): pass def test_torch_encode_plus_sent_to_model(self): pass def test_pretrained_model_lists(self): pass def test_get_vocab(self): pass def test_pretokenized_inputs(self): pass def test_conversion_reversible(self): pass
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license fixme amy fixme amy fixme amy fixme amy
import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import ChineseCLIPImageProcessor if is_torch_available(): pass class ChineseCLIPImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): size = size if size is not None else {"height": 224, "width": 224} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): return 3, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ChineseCLIPImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ChineseCLIPImageProcessingTester(self, do_center_crop=True) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 224, "width": 224}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) @unittest.skip("ChineseCLIPImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") def test_call_numpy_4_channels(self): pass @require_torch @require_vision class ChineseCLIPImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=True) self.expected_encoded_image_num_channels = 3 @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) @unittest.skip("ChineseCLIPImageProcessor does not support 4 channels yet") def test_call_numpy(self): return super().test_call_numpy() @unittest.skip("ChineseCLIPImageProcessor does not support 4 channels yet") def test_call_pytorch(self): return super().test_call_torch() @unittest.skip("ChineseCLIPImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") def test_call_numpy_4_channels(self): pass
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch chineseclip model import inspect import os import tempfile import unittest import numpy as np import requests from transformers import chineseclipconfig chinesecliptextconfig chineseclipvisionconfig from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import modelforpretrainingmapping chineseclipmodel chinesecliptextmodel chineseclipvisionmodel from transformers models chineseclip modelingchineseclip import chineseclippretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import chineseclipprocessor class chinesecliptextmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels def getconfigself return chinesecliptextconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange def prepareconfigandinputsfordecoderself config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels self prepareconfigandinputs config isdecoder true encoderhiddenstates floatstensorself batchsize self seqlength self hiddensize encoderattentionmask idstensorself batchsize self seqlength vocabsize2 return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model chinesecliptextmodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckmodelasdecoder self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask config addcrossattention true model chinesecliptextmodelconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask result model inputids attentionmaskinputmask tokentypeidstokentypeids encoderhiddenstatesencoderhiddenstates result modelinputids attentionmaskinputmask tokentypeidstokentypeids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict class chineseclipvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return chineseclipvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model chineseclipvisionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class chinesecliptextmodeltestmodeltestermixin unittest testcase allmodelclasses chinesecliptextmodel if istorchavailable else fxcompatible false special case for forpretraining model def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass in getvaluesmodelforpretrainingmapping inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice inputsdictnextsentencelabel torch zeros self modeltester batchsize dtypetorch long devicetorchdevice return inputsdict def setupself self modeltester chinesecliptextmodeltesterself self configtester configtesterself configclasschinesecliptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testmodelasdecoderself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckmodelasdecoderconfigandinputs def testmodelasdecoderwithdefaultinputmaskself this regression test was failing with pytorch 1 3 config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask self modeltester prepareconfigandinputsfordecoder inputmask none self modeltester createandcheckmodelasdecoder config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask slow def testmodelfrompretrainedself for modelname in chineseclippretrainedmodelarchivelist 1 model chinesecliptextmodel frompretrainedmodelname self assertisnotnonemodel def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonchinesecliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonchinesecliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass requiretorch class chineseclipvisionmodeltestmodeltestermixin unittest testcase allmodelclasses chineseclipvisionmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester chineseclipvisionmodeltesterself self configtester configtester self configclasschineseclipvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonchineseclip does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonchineseclipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonchineseclipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in chineseclippretrainedmodelarchivelist 1 model chineseclipvisionmodel frompretrainedmodelname self assertisnotnonemodel class chineseclipmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester chinesecliptextmodeltesterparent textkwargs self visionmodeltester chineseclipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself config inputids tokentypeids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids tokentypeids attentionmask pixelvalues def getconfigself return chineseclipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids tokentypeids attentionmask pixelvalues model chineseclipmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask tokentypeids self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids attentionmask pixelvalues configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask attentionmask pixelvalues pixelvalues returnloss true return config inputsdict requiretorch class chineseclipmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses chineseclipmodel if istorchavailable else pipelinemodelmapping featureextraction chineseclipmodel if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false def setupself textkwargs uselabels false batchsize 12 visionkwargs batchsize 12 self modeltester chineseclipmodeltesterself textkwargs visionkwargs def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonchineseclipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass override as the logitscale parameter initilization is different for chineseclip def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for subconfigkey in visionconfig textconfig subconfig getattrconfigsnoinit subconfigkey setattrconfigsnoinit subconfigkey configzeroinitsubconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues chineseclip needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal slow def testmodelfrompretrainedself for modelname in chineseclippretrainedmodelarchivelist 1 model chineseclipmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of pikachu def prepareimg url https clipcnbeijing osscnbeijing aliyuncs compokemon jpeg im image openrequests geturl streamtrue raw return im requirevision requiretorch class chineseclipmodelintegrationtestunittest testcase slow def testinferenceself modelname ofasyschineseclipvitbasepatch16 model chineseclipmodel frompretrainedmodelname totorchdevice processor chineseclipprocessor frompretrainedmodelname image prepareimg inputs processor text imagesimage paddingtrue returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits self assertequal outputs logitsperimage shape torch sizeinputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape torch sizeinputs inputids shape0 inputs pixelvalues shape0 probs outputs logitsperimage softmaxdim1 expectedprobs torch tensor1 2686e03 5 4499e02 6 7968e04 9 4355e01 devicetorchdevice self asserttruetorch allcloseprobs expectedprobs atol5e3 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch chinese clip model returns a tiny configuration by default in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token special case for forpretraining model this regression test was failing with pytorch 1 3 here we also overwrite some of the tests of test_modeling_common py as chinese_clip does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic override as the logit_scale parameter initilization is different for chinese_clip check if logit_scale is initilized as per the original implementation to be sure we have no nan chinese_clip needs pixel_values we will verify our results on an image of pikachu forward pass verify the logits
import inspect import os import tempfile import unittest import numpy as np import requests from transformers import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, ChineseCLIPModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) from transformers.models.chinese_clip.modeling_chinese_clip import CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ChineseCLIPProcessor class ChineseCLIPTextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return ChineseCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ChineseCLIPTextModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = ChineseCLIPTextModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict class ChineseCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return ChineseCLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = ChineseCLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ChineseCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPTextModel,) if is_torch_available() else () fx_compatible = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = ChineseCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=ChineseCLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): for model_name in CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ChineseCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ChineseCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ChineseCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @require_torch class ChineseCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ChineseCLIPVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=ChineseCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CHINESE_CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ChineseCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ChineseCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ChineseCLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class ChineseCLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = ChineseCLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = ChineseCLIPVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): ( config, input_ids, token_type_ids, attention_mask, _, __, ___, ) = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, token_type_ids, attention_mask, pixel_values def get_config(self): return ChineseCLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, token_type_ids, attention_mask, pixel_values): model = ChineseCLIPModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask, token_type_ids) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ChineseCLIPModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): text_kwargs = {"use_labels": False, "batch_size": 12} vision_kwargs = {"batch_size": 12} self.model_tester = ChineseCLIPModelTester(self, text_kwargs, vision_kwargs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ChineseCLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for sub_config_key in ("vision_config", "text_config"): sub_config = getattr(configs_no_init, sub_config_key, {}) setattr(configs_no_init, sub_config_key, _config_zero_init(sub_config)) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): for model_name in CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ChineseCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class ChineseCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPModel.from_pretrained(model_name).to(torch_device) processor = ChineseCLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, padding=True, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) probs = outputs.logits_per_image.softmax(dim=1) expected_probs = torch.tensor([[1.2686e-03, 5.4499e-02, 6.7968e-04, 9.4355e-01]], device=torch_device) self.assertTrue(torch.allclose(probs, expected_probs, atol=5e-3))
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper testfeatureextractionwhisper floatslist creates a random float32 tensor if rng is none rng globalrng values for batchidx in rangeshape0 values append for in rangeshape1 values1 appendrng random scale return values requiretorch requiretorchaudio copied from tests models whisper testfeatureextractionwhisper whisperfeatureextractiontester with whisperclap class clapfeatureextractiontesterunittest testcase def init self parent batchsize7 minseqlength400 maxseqlength2000 featuresize10 hoplength160 chunklength8 paddingvalue0 0 samplingrate4000 returnattentionmaskfalse donormalizetrue self parent parent self batchsize batchsize self minseqlength minseqlength self maxseqlength maxseqlength self seqlengthdiff self maxseqlength self minseqlength self batchsize 1 self paddingvalue paddingvalue self samplingrate samplingrate self returnattentionmask returnattentionmask self donormalize donormalize self featuresize featuresize self chunklength chunklength self hoplength hoplength def preparefeatextractdictself return featuresize self featuresize hoplength self hoplength chunklength self chunklength paddingvalue self paddingvalue samplingrate self samplingrate returnattentionmask self returnattentionmask donormalize self donormalize def prepareinputsforcommonself equallengthfalse numpifyfalse def flattenlistoflists return listitertools chainlistoflists if equallength speechinputs floatslistself maxseqlength self featuresize for in rangeself batchsize else make sure that inputs increase in size speechinputs floatslistx self featuresize for x in rangeself minseqlength self maxseqlength self seqlengthdiff if numpify speechinputs np asarrayx for x in speechinputs return speechinputs requiretorch requiretorchaudio class clapfeatureextractiontestsequencefeatureextractiontestmixin unittest testcase featureextractionclass clapfeatureextractor copied from tests models whisper testfeatureextractionwhisper whisperfeatureextractiontest setup with whisperclap def setupself self featextracttester clapfeatureextractiontesterself def testcallself tests that all call wrap to encodeplus and batchencodeplus featureextractor self featureextractionclassself featextracttester preparefeatextractdict create three inputs of length 800 1000 and 1200 speechinputs floatslist1 x0 for x in range800 1400 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs test feature size inputfeatures featureextractornpspeechinputs paddingmaxlength returntensorsnp inputfeatures self asserttrueinputfeatures ndim 4 test not batched input encodedsequences1 featureextractorspeechinputs0 returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs0 returntensorsnp inputfeatures self asserttruenp allcloseencodedsequences1 encodedsequences2 atol1e3 test batched encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test 2d numpy arrays are batched speechinputs floatslist1 x0 for x in 800 800 800 npspeechinputs np asarrayspeechinputs encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 copied from tests models whisper testfeatureextractionwhisper whisperfeatureextractiontest testdoubleprecisionpad def testdoubleprecisionpadself import torch featureextractor self featureextractionclassself featextracttester preparefeatextractdict npspeechinputs np random rand100 32 astypenp float64 pyspeechinputs npspeechinputs tolist for inputs in pyspeechinputs npspeechinputs npprocessed featureextractor padinputfeatures inputs returntensorsnp self asserttruenpprocessed inputfeatures dtype np float32 ptprocessed featureextractor padinputfeatures inputs returntensorspt self asserttrueptprocessed inputfeatures dtype torch float32 copied from tests models whisper testfeatureextractionwhisper whisperfeatureextractiontest loaddatasamples def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples def testintegrationfusionshortinputself fmt off expectedinputfeatures torch tensor repeat 20 1049 19 9764 20 0731 19 5055 27 5018 22 5761 26 6071 29 0091 26 4659 26 4236 28 8808 31 9190 32 4848 34 1186 34 0340 32 8803 30 9895 37 6238 38 0347 40 6263 36 3496 42 2533 32 9132 27 7068 29 3704 30 3208 22 5972 27 1494 30 1975 31 1005 29 9372 27 1917 25 9806 30 3489 33 2380 31 9062 36 5498 32 8721 30 5629 27 4674 22 2232 22 5653 16 3868 17 2713 25 9738 30 6256 34 3766 31 1292 27 8950 27 0588 25 6206 23 0712 26 6050 28 0112 32 6847 34 3396 34 9738 35 8463 39 2324 37 1188 33 3705 28 9230 28 9112 28 6578 36 7233 30 0587 24 8431 18 4611 16 8149 23 9319 32 8580 34 2264 27 4332 26 8027 29 2721 33 9033 39 3403 35 3232 26 8076 28 6460 35 2780 36 0738 35 4996 37 7631 39 5056 34 7112 36 8741 34 1066 32 9474 33 6604 27 9937 30 9594 26 2928 32 0485 29 2151 29 2917 32 7308 29 6542 31 1454 37 0088 32 3388 37 3086 31 1024 27 2889 19 6788 21 1488 19 5144 14 8889 21 2006 24 7488 27 7940 31 1058 27 5068 21 5737 22 3780 21 5151 26 3086 30 9223 33 5043 32 0307 37 3806 41 6188 45 6650 40 5131 32 5023 26 7385 26 3709 26 7761 repeatpad 25 7496 24 9339 24 1357 23 1271 23 7853 26 1264 29 1456 33 2060 37 8179 42 4833 41 9386 41 2164 42 3566 44 2575 40 0217 36 6794 36 6974 38 7819 42 0880 45 5560 39 9368 36 3219 35 5981 36 6434 35 1851 33 0684 30 0437 30 2010 34 3476 42 1373 38 8039 37 3355 40 4576 41 0485 40 6377 38 2275 42 7481 34 6084 34 7048 29 5149 26 3935 26 8952 34 1336 26 2904 28 2571 32 5642 36 7240 35 5334 38 2451 34 8177 28 9754 25 1096 27 9768 32 3184 37 0269 40 5136 40 8061 36 4948 40 3767 38 9671 38 3552 34 1250 30 9035 31 6112 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 none same as repeatpad 25 7496 24 9339 24 1357 23 1271 23 7853 26 1264 29 1456 33 2060 37 8179 42 4833 41 9386 41 2164 42 3566 44 2575 40 0217 36 6794 36 6974 38 7819 42 0880 45 5560 39 9368 36 3219 35 5981 36 6434 35 1851 33 0684 30 0437 30 2010 34 3476 42 1373 38 8039 37 3355 40 4576 41 0485 40 6377 38 2275 42 7481 34 6084 34 7048 29 5149 26 3935 26 8952 34 1336 26 2904 28 2571 32 5642 36 7240 35 5334 38 2451 34 8177 28 9754 25 1096 27 9768 32 3184 37 0269 40 5136 40 8061 36 4948 40 3767 38 9671 38 3552 34 1250 30 9035 31 6112 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 pad 58 5260 58 1155 57 8623 57 5059 57 9178 58 7171 59 2343 59 9833 60 9764 62 0722 63 5723 65 7111 67 5153 68 7088 69 8325 70 2987 70 1548 70 6233 71 5702 72 5159 72 3821 70 1817 67 0315 64 1387 62 2202 61 0717 60 4951 61 6005 63 7358 67 1400 67 6185 65 5635 64 3593 63 7138 63 6209 66 4950 72 6284 63 3961 56 8334 52 7319 50 6310 51 3728 53 5619 51 9190 50 9708 52 8684 55 8073 58 8227 60 6991 57 0547 52 7611 51 4388 54 4892 60 8950 66 1024 72 4352 67 8538 65 1463 68 7588 72 3080 68 4864 60 4688 57 1516 60 9460 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 fmt on melbin 976 977 976 977 976 977 196 197 inputspeech self loaddatasamples1 featureextractor clapfeatureextractor for padding expectedvalues idxinmel in zip repeat repeatpad none pad expectedinputfeatures melbin inputfeatures featureextractorinputspeech returntensorspt paddingpadding inputfeatures self assertequalinputfeatures shape 1 4 1001 64 self asserttruetorch allcloseinputfeatures0 0 idxinmel0 expectedvalues0 atol1e4 self asserttruetorch allcloseinputfeatures0 0 idxinmel1 expectedvalues1 atol1e4 self asserttruetorch allinputfeatures0 0 inputfeatures0 1 self asserttruetorch allinputfeatures0 0 inputfeatures0 2 self asserttruetorch allinputfeatures0 0 inputfeatures0 3 def testintegrationrandtruncshortinputself fmt off expectedinputfeatures torch tensor repeat 35 0483 35 7865 38 2884 40 0220 42 5349 44 9489 43 2228 44 6499 47 6253 49 6983 50 2127 52 5483 52 2223 51 9157 49 4082 51 2024 57 0476 56 2803 58 1618 60 7474 55 0389 60 9514 59 3080 50 4419 47 8172 48 7570 55 2552 44 5036 44 1148 50 8218 51 0968 52 9408 51 1037 48 9789 47 5897 52 0915 55 4216 54 1529 58 0149 58 0866 52 7798 52 6154 45 9144 46 2008 40 7603 41 1703 50 2250 55 4112 59 4818 54 5795 53 5552 51 3668 49 8358 50 3186 54 0452 57 6030 61 1589 61 6415 63 2756 66 5890 62 8543 58 0665 56 7203 56 7632 47 1320 37 9961 34 0076 36 7109 47 9057 48 4924 43 8371 44 9728 48 1689 52 9141 57 6077 52 8520 44 8502 45 6764 51 8389 56 4284 54 6972 53 4889 55 6077 58 7149 60 3760 54 0136 56 0730 55 9870 54 4017 53 1094 53 5640 50 3064 49 9520 49 3239 48 1668 53 4852 50 4561 50 8688 55 1970 51 5538 53 0260 59 6933 54 8183 59 5895 55 9589 50 3761 44 1282 44 1463 43 8540 39 1168 45 3893 49 5542 53 1505 55 2870 50 3921 46 8511 47 4444 49 5633 56 0034 59 0815 59 0018 63 7589 69 5745 71 5789 64 0498 56 0558 54 3475 54 7004 repeatpad 40 3184 39 7186 39 8807 41 6508 45 3613 50 4785 57 0297 60 4944 59 1642 58 9495 60 4661 62 5300 58 4759 55 2865 54 8973 56 0780 57 5482 59 6557 64 3309 65 0330 59 4941 56 8552 55 0519 55 9817 56 9739 55 2827 54 5312 51 4141 50 4289 51 9131 57 5821 63 9979 59 9180 58 9489 62 3247 62 6975 63 7948 60 5250 64 6107 58 7905 57 0229 54 3084 49 8445 50 4459 57 0172 50 6425 52 5992 57 4207 61 6358 60 6540 63 1968 57 4360 52 3263 51 7695 57 1946 62 9610 66 7359 67 0335 63 7440 68 1775 66 3798 62 8650 59 8972 59 3139 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 none same as repeatpad 40 3184 39 7186 39 8807 41 6508 45 3613 50 4785 57 0297 60 4944 59 1642 58 9495 60 4661 62 5300 58 4759 55 2865 54 8973 56 0780 57 5482 59 6557 64 3309 65 0330 59 4941 56 8552 55 0519 55 9817 56 9739 55 2827 54 5312 51 4141 50 4289 51 9131 57 5821 63 9979 59 9180 58 9489 62 3247 62 6975 63 7948 60 5250 64 6107 58 7905 57 0229 54 3084 49 8445 50 4459 57 0172 50 6425 52 5992 57 4207 61 6358 60 6540 63 1968 57 4360 52 3263 51 7695 57 1946 62 9610 66 7359 67 0335 63 7440 68 1775 66 3798 62 8650 59 8972 59 3139 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 pad 73 3190 73 6349 74 1451 74 8539 75 7476 76 5438 78 5540 80 1339 81 8911 83 7560 85 5387 86 7466 88 2072 88 6090 88 8243 89 0784 89 4364 89 8179 91 3146 92 2833 91 7221 90 9440 88 1315 86 2425 84 2281 82 4893 81 5993 81 1328 81 5759 83 1068 85 6525 88 9520 88 9187 87 2703 86 3052 85 7188 85 8802 87 9996 95 0464 88 0133 80 8561 76 5597 74 2816 74 8109 77 3615 76 0719 75 3426 77 6428 80 9663 84 5275 84 9907 80 5205 77 2851 78 6259 84 7740 91 4535 98 1894 94 3872 92 3735 97 6807 98 1501 91 4344 85 2842 88 4338 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 fmt on melbin 976 977 976 977 976 977 196 197 inputspeech self loaddatasamples1 featureextractor clapfeatureextractor for padding expectedvalues idxinmel in zip repeat repeatpad none pad expectedinputfeatures melbin inputfeatures featureextractor inputspeech returntensorspt truncationrandtrunc paddingpadding inputfeatures self assertequalinputfeatures shape 1 1 1001 64 self asserttruetorch allcloseinputfeatures0 0 idxinmel0 expectedvalues0 atol1e4 self asserttruetorch allcloseinputfeatures0 0 idxinmel1 expectedvalues1 atol1e4 def testintegrationfusionlonginputself fmt off expectedinputfeatures torch tensor 11 1830 10 1894 8 6051 4 8578 1 3268 8 4606 14 5453 9 2017 0 5781 16 2129 14 8289 3 6326 3 8794 6 5544 2 4408 1 9531 6 0967 1 7590 7 6730 6 1571 2 0052 16 6694 20 6447 21 2145 13 4972 15 9043 16 8987 4 1766 11 9428 21 2372 12 3016 4 8604 6 7241 1 8543 4 9235 5 3188 0 9897 1 2416 6 5864 2 9529 2 9274 6 4753 10 2300 11 2127 3 4042 1 0055 6 0475 6 7524 3 9801 1 4434 0 4740 0 1584 4 5457 8 5746 8 8428 13 1475 9 6079 8 5798 4 1143 3 7966 7 1651 6 1517 8 0258 12 1486 10 2017 7 9924 5 9517 3 9372 1 9735 4 3130 16 1647 25 0592 23 5532 14 4974 7 0778 10 2262 6 4782 20 3454 19 4269 1 7976 16 5070 4 9380 12 3390 6 9285 13 6325 8 5298 1 0839 5 9629 8 4812 3 1331 2 0963 16 6046 14 0070 17 5707 13 2080 17 2168 17 7770 12 1111 18 6184 17 1897 13 9801 12 0426 23 5400 25 6823 23 5813 18 7847 20 5473 25 6458 19 7585 27 6007 28 9276 24 8948 25 4458 22 2807 19 6613 19 2669 15 7813 19 6821 24 3439 22 2598 28 2631 30 1017 32 7646 33 6525 27 5639 22 0548 27 8054 29 6947 9 2078 7 2963 6 2095 7 9959 2 9280 11 1843 6 1490 5 0733 19 2957 21 4578 14 6803 3 3153 6 3334 2 3542 6 9509 15 2965 14 6620 5 2075 0 0873 1 1919 18 1986 20 8470 10 8035 2 2516 7 6905 7 7427 1 2543 5 0018 0 9809 2 1584 5 4580 5 4760 11 8888 9 0605 8 4638 9 9897 0 0540 5 1629 0 0483 4 1504 4 8140 7 8236 9 0622 10 1742 8 9597 11 5380 16 5603 17 1858 17 5032 20 9326 23 9543 25 2602 25 3429 27 4536 26 8859 22 7852 25 8288 24 8399 23 8893 24 2096 26 5415 23 7281 25 6851 22 3629 1 3448 2 9883 4 0366 0 8019 10 4191 10 0883 4 3812 0 8136 2 1579 0 0832 1 0949 0 9759 5 5319 4 6009 6 5452 14 9155 20 1584 9 3611 2 4271 1 4031 4 9910 8 6916 8 6785 10 1973 9 9029 5 3840 7 5336 5 2803 2 8144 0 3138 2 2216 5 7328 7 5574 7 7402 1 0681 3 1049 7 0742 6 5588 7 3712 5 7881 8 6874 8 7725 2 8133 4 5809 6 1317 5 1719 5 0192 9 0977 10 9391 6 0769 1 6016 0 8965 7 2252 7 8632 11 4468 11 7446 10 7447 7 0601 2 7748 4 1798 2 8433 3 1352 0 8097 6 4212 fmt on melbin 963 inputspeech torch cattorch tensorx for x in self loaddatasamples5 featureextractor clapfeatureextractor for padding expectedvalues blockidx in zip repeat repeatpad none pad expectedinputfeatures 1 2 0 3 setseed987654321 inputfeatures featureextractorinputspeech returntensorspt paddingpadding inputfeatures self assertequalinputfeatures shape 1 4 1001 64 self asserttruetorch allcloseinputfeatures0 blockidx melbin expectedvalues atol1e3 def testintegrationrandtrunclonginputself fmt off expectedinputfeatures torch tensor 35 4022 32 7555 31 2004 32 7764 42 5770 41 6339 43 1630 44 5080 44 3029 48 9628 39 5022 39 2105 43 1350 43 2195 48 4894 52 2344 57 6891 52 2228 45 5155 44 2893 43 4697 46 6702 43 7490 40 4819 42 7275 46 3434 46 8412 41 2003 43 1681 46 2948 46 1925 47 8333 45 6812 44 9182 41 7786 43 3809 44 3199 42 8814 45 4771 46 7114 46 9746 42 7090 41 6057 38 3965 40 1980 41 0263 34 1256 28 3289 29 0201 30 4453 29 5561 30 1734 25 9406 19 0897 15 8452 20 1351 23 6515 23 1194 17 1845 19 4399 23 6527 22 8768 20 7279 22 7864 35 7719 27 2566 23 6964 27 5521 0 2510 7 4391 1 3917 13 3417 28 1758 17 0856 5 7723 0 8000 7 8832 15 5548 30 5935 24 7571 13 7009 10 3432 21 2464 24 8118 19 4080 14 9779 11 7991 18 4485 20 1982 17 3652 20 6328 28 2967 25 7819 21 8962 28 5083 29 5719 30 2120 35 7033 31 8218 34 0408 37 7744 33 9653 31 3009 30 9063 28 6153 32 2202 28 5456 28 8579 32 5170 37 9152 43 0052 46 4849 44 0786 39 1933 33 2757 31 6313 42 6386 52 3679 53 5785 55 6444 47 0050 47 6459 56 6361 60 6781 61 5244 55 8272 60 4832 58 1897 38 2686 36 6285 32 5835 35 1693 37 7938 37 4035 35 3132 35 6083 36 3609 40 9472 36 7846 36 1544 38 9076 39 3618 35 4953 34 2809 39 9466 39 7433 34 8347 37 5674 41 5689 38 9161 34 3947 30 2924 30 4841 34 5831 28 9261 24 8849 31 2324 27 1622 27 2107 25 9385 30 1691 30 9223 23 9495 25 6047 26 7119 28 5523 27 7481 32 8427 35 4650 31 0399 31 2073 30 5163 22 9819 20 8892 19 2510 24 7905 28 9426 28 1998 26 7386 25 0140 27 9223 32 9913 33 1864 34 9742 38 5995 39 6990 29 3203 22 4697 25 6415 33 5608 33 0945 27 1716 33 2015 28 7741 21 9457 23 4888 32 1072 8 6307 3 2724 5 9157 0 9221 30 1814 31 0015 27 4508 27 0477 9 5342 0 3221 0 6511 7 1596 25 9707 32 8924 32 2300 13 8974 0 4895 0 9168 10 7663 27 1176 35 0829 11 6859 4 8855 11 8898 26 6167 5 6192 3 8443 19 7947 14 4101 8 6236 21 2458 21 0801 17 9136 24 4663 18 6333 24 8085 15 5854 15 4344 11 5046 22 3625 27 3387 32 4353 30 9670 31 3789 35 4044 34 4591 25 2433 28 0773 33 8736 33 0224 33 3155 38 5302 39 2741 36 6395 34 7729 32 4483 42 4001 49 2857 39 1682 fmt on melbin 963 seeds 987654321 1234 666 5555 inputspeech torch cattorch tensorx for x in self loaddatasamples5 featureextractor clapfeatureextractor for padding expectedvalues seed in zip repeat repeatpad none pad expectedinputfeatures seeds setseedseed inputfeatures featureextractor inputspeech returntensorspt truncationrandtrunc paddingpadding inputfeatures self assertequalinputfeatures shape 1 1 1001 64 self asserttruetorch allcloseinputfeatures0 0 melbin expectedvalues atol1e4 coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor copied from tests models whisper test_feature_extraction_whisper whisperfeatureextractiontester with whisper clap make sure that inputs increase in size copied from tests models whisper test_feature_extraction_whisper whisperfeatureextractiontest setup with whisper clap tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test feature size test not batched input test batched test 2 d numpy arrays are batched copied from tests models whisper test_feature_extraction_whisper whisperfeatureextractiontest test_double_precision_pad copied from tests models whisper test_feature_extraction_whisper whisperfeatureextractiontest _load_datasamples automatic decoding with librispeech fmt off repeat repeatpad none same as repeatpad pad fmt on fmt off repeat repeatpad none same as repeatpad pad fmt on fmt off fmt on fmt off fmt on
import itertools import random import unittest import numpy as np from datasets import load_dataset from transformers import ClapFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.trainer_utils import set_seed from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class ClapFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=10, hop_length=160, chunk_length=8, padding_value=0.0, sampling_rate=4_000, return_attention_mask=False, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize self.feature_size = feature_size self.chunk_length = chunk_length self.hop_length = hop_length def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class ClapFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = ClapFeatureExtractor def setUp(self): self.feat_extract_tester = ClapFeatureExtractionTester(self) def test_call(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features self.assertTrue(input_features.ndim == 4) encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration_fusion_short_input(self): EXPECTED_INPUT_FEATURES = torch.tensor( [ [ [ -20.1049, -19.9764, -20.0731, -19.5055, -27.5018, -22.5761, -26.6071, -29.0091, -26.4659, -26.4236, -28.8808, -31.9190, -32.4848, -34.1186, -34.0340, -32.8803, -30.9895, -37.6238, -38.0347, -40.6263, -36.3496, -42.2533, -32.9132, -27.7068, -29.3704, -30.3208, -22.5972, -27.1494, -30.1975, -31.1005, -29.9372, -27.1917, -25.9806, -30.3489, -33.2380, -31.9062, -36.5498, -32.8721, -30.5629, -27.4674, -22.2232, -22.5653, -16.3868, -17.2713, -25.9738, -30.6256, -34.3766, -31.1292, -27.8950, -27.0588, -25.6206, -23.0712, -26.6050, -28.0112, -32.6847, -34.3396, -34.9738, -35.8463, -39.2324, -37.1188, -33.3705, -28.9230, -28.9112, -28.6578 ], [ -36.7233, -30.0587, -24.8431, -18.4611, -16.8149, -23.9319, -32.8580, -34.2264, -27.4332, -26.8027, -29.2721, -33.9033, -39.3403, -35.3232, -26.8076, -28.6460, -35.2780, -36.0738, -35.4996, -37.7631, -39.5056, -34.7112, -36.8741, -34.1066, -32.9474, -33.6604, -27.9937, -30.9594, -26.2928, -32.0485, -29.2151, -29.2917, -32.7308, -29.6542, -31.1454, -37.0088, -32.3388, -37.3086, -31.1024, -27.2889, -19.6788, -21.1488, -19.5144, -14.8889, -21.2006, -24.7488, -27.7940, -31.1058, -27.5068, -21.5737, -22.3780, -21.5151, -26.3086, -30.9223, -33.5043, -32.0307, -37.3806, -41.6188, -45.6650, -40.5131, -32.5023, -26.7385, -26.3709, -26.7761 ] ], [ [ -25.7496, -24.9339, -24.1357, -23.1271, -23.7853, -26.1264, -29.1456, -33.2060, -37.8179, -42.4833, -41.9386, -41.2164, -42.3566, -44.2575, -40.0217, -36.6794, -36.6974, -38.7819, -42.0880, -45.5560, -39.9368, -36.3219, -35.5981, -36.6434, -35.1851, -33.0684, -30.0437, -30.2010, -34.3476, -42.1373, -38.8039, -37.3355, -40.4576, -41.0485, -40.6377, -38.2275, -42.7481, -34.6084, -34.7048, -29.5149, -26.3935, -26.8952, -34.1336, -26.2904, -28.2571, -32.5642, -36.7240, -35.5334, -38.2451, -34.8177, -28.9754, -25.1096, -27.9768, -32.3184, -37.0269, -40.5136, -40.8061, -36.4948, -40.3767, -38.9671, -38.3552, -34.1250, -30.9035, -31.6112 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ [ -25.7496, -24.9339, -24.1357, -23.1271, -23.7853, -26.1264, -29.1456, -33.2060, -37.8179, -42.4833, -41.9386, -41.2164, -42.3566, -44.2575, -40.0217, -36.6794, -36.6974, -38.7819, -42.0880, -45.5560, -39.9368, -36.3219, -35.5981, -36.6434, -35.1851, -33.0684, -30.0437, -30.2010, -34.3476, -42.1373, -38.8039, -37.3355, -40.4576, -41.0485, -40.6377, -38.2275, -42.7481, -34.6084, -34.7048, -29.5149, -26.3935, -26.8952, -34.1336, -26.2904, -28.2571, -32.5642, -36.7240, -35.5334, -38.2451, -34.8177, -28.9754, -25.1096, -27.9768, -32.3184, -37.0269, -40.5136, -40.8061, -36.4948, -40.3767, -38.9671, -38.3552, -34.1250, -30.9035, -31.6112 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ [ -58.5260, -58.1155, -57.8623, -57.5059, -57.9178, -58.7171, -59.2343, -59.9833, -60.9764, -62.0722, -63.5723, -65.7111, -67.5153, -68.7088, -69.8325, -70.2987, -70.1548, -70.6233, -71.5702, -72.5159, -72.3821, -70.1817, -67.0315, -64.1387, -62.2202, -61.0717, -60.4951, -61.6005, -63.7358, -67.1400, -67.6185, -65.5635, -64.3593, -63.7138, -63.6209, -66.4950, -72.6284, -63.3961, -56.8334, -52.7319, -50.6310, -51.3728, -53.5619, -51.9190, -50.9708, -52.8684, -55.8073, -58.8227, -60.6991, -57.0547, -52.7611, -51.4388, -54.4892, -60.8950, -66.1024, -72.4352, -67.8538, -65.1463, -68.7588, -72.3080, -68.4864, -60.4688, -57.1516, -60.9460 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ] ] ) MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]] input_speech = self._load_datasamples(1) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, idx_in_mel in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, MEL_BIN ): input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features self.assertEqual(input_features.shape, (1, 4, 1001, 64)) self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], atol=1e-4)) self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], atol=1e-4)) self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 1])) self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 2])) self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 3])) def test_integration_rand_trunc_short_input(self): EXPECTED_INPUT_FEATURES = torch.tensor( [ [ [ -35.0483, -35.7865, -38.2884, -40.0220, -42.5349, -44.9489, -43.2228, -44.6499, -47.6253, -49.6983, -50.2127, -52.5483, -52.2223, -51.9157, -49.4082, -51.2024, -57.0476, -56.2803, -58.1618, -60.7474, -55.0389, -60.9514, -59.3080, -50.4419, -47.8172, -48.7570, -55.2552, -44.5036, -44.1148, -50.8218, -51.0968, -52.9408, -51.1037, -48.9789, -47.5897, -52.0915, -55.4216, -54.1529, -58.0149, -58.0866, -52.7798, -52.6154, -45.9144, -46.2008, -40.7603, -41.1703, -50.2250, -55.4112, -59.4818, -54.5795, -53.5552, -51.3668, -49.8358, -50.3186, -54.0452, -57.6030, -61.1589, -61.6415, -63.2756, -66.5890, -62.8543, -58.0665, -56.7203, -56.7632 ], [ -47.1320, -37.9961, -34.0076, -36.7109, -47.9057, -48.4924, -43.8371, -44.9728, -48.1689, -52.9141, -57.6077, -52.8520, -44.8502, -45.6764, -51.8389, -56.4284, -54.6972, -53.4889, -55.6077, -58.7149, -60.3760, -54.0136, -56.0730, -55.9870, -54.4017, -53.1094, -53.5640, -50.3064, -49.9520, -49.3239, -48.1668, -53.4852, -50.4561, -50.8688, -55.1970, -51.5538, -53.0260, -59.6933, -54.8183, -59.5895, -55.9589, -50.3761, -44.1282, -44.1463, -43.8540, -39.1168, -45.3893, -49.5542, -53.1505, -55.2870, -50.3921, -46.8511, -47.4444, -49.5633, -56.0034, -59.0815, -59.0018, -63.7589, -69.5745, -71.5789, -64.0498, -56.0558, -54.3475, -54.7004 ] ], [ [ -40.3184, -39.7186, -39.8807, -41.6508, -45.3613, -50.4785, -57.0297, -60.4944, -59.1642, -58.9495, -60.4661, -62.5300, -58.4759, -55.2865, -54.8973, -56.0780, -57.5482, -59.6557, -64.3309, -65.0330, -59.4941, -56.8552, -55.0519, -55.9817, -56.9739, -55.2827, -54.5312, -51.4141, -50.4289, -51.9131, -57.5821, -63.9979, -59.9180, -58.9489, -62.3247, -62.6975, -63.7948, -60.5250, -64.6107, -58.7905, -57.0229, -54.3084, -49.8445, -50.4459, -57.0172, -50.6425, -52.5992, -57.4207, -61.6358, -60.6540, -63.1968, -57.4360, -52.3263, -51.7695, -57.1946, -62.9610, -66.7359, -67.0335, -63.7440, -68.1775, -66.3798, -62.8650, -59.8972, -59.3139 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ [ -40.3184, -39.7186, -39.8807, -41.6508, -45.3613, -50.4785, -57.0297, -60.4944, -59.1642, -58.9495, -60.4661, -62.5300, -58.4759, -55.2865, -54.8973, -56.0780, -57.5482, -59.6557, -64.3309, -65.0330, -59.4941, -56.8552, -55.0519, -55.9817, -56.9739, -55.2827, -54.5312, -51.4141, -50.4289, -51.9131, -57.5821, -63.9979, -59.9180, -58.9489, -62.3247, -62.6975, -63.7948, -60.5250, -64.6107, -58.7905, -57.0229, -54.3084, -49.8445, -50.4459, -57.0172, -50.6425, -52.5992, -57.4207, -61.6358, -60.6540, -63.1968, -57.4360, -52.3263, -51.7695, -57.1946, -62.9610, -66.7359, -67.0335, -63.7440, -68.1775, -66.3798, -62.8650, -59.8972, -59.3139 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ], [ [ -73.3190, -73.6349, -74.1451, -74.8539, -75.7476, -76.5438, -78.5540, -80.1339, -81.8911, -83.7560, -85.5387, -86.7466, -88.2072, -88.6090, -88.8243, -89.0784, -89.4364, -89.8179, -91.3146, -92.2833, -91.7221, -90.9440, -88.1315, -86.2425, -84.2281, -82.4893, -81.5993, -81.1328, -81.5759, -83.1068, -85.6525, -88.9520, -88.9187, -87.2703, -86.3052, -85.7188, -85.8802, -87.9996, -95.0464, -88.0133, -80.8561, -76.5597, -74.2816, -74.8109, -77.3615, -76.0719, -75.3426, -77.6428, -80.9663, -84.5275, -84.9907, -80.5205, -77.2851, -78.6259, -84.7740, -91.4535, -98.1894, -94.3872, -92.3735, -97.6807, -98.1501, -91.4344, -85.2842, -88.4338 ], [ -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100. ] ] ] ) MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]] input_speech = self._load_datasamples(1) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, idx_in_mel in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, MEL_BIN ): input_features = feature_extractor( input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding ).input_features self.assertEqual(input_features.shape, (1, 1, 1001, 64)) self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], atol=1e-4)) self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], atol=1e-4)) def test_integration_fusion_long_input(self): EXPECTED_INPUT_FEATURES = torch.tensor( [ [ -11.1830, -10.1894, -8.6051, -4.8578, -1.3268, -8.4606, -14.5453, -9.2017, 0.5781, 16.2129, 14.8289, 3.6326, -3.8794, -6.5544, -2.4408, 1.9531, 6.0967, 1.7590, -7.6730, -6.1571, 2.0052, 16.6694, 20.6447, 21.2145, 13.4972, 15.9043, 16.8987, 4.1766, 11.9428, 21.2372, 12.3016, 4.8604, 6.7241, 1.8543, 4.9235, 5.3188, -0.9897, -1.2416, -6.5864, 2.9529, 2.9274, 6.4753, 10.2300, 11.2127, 3.4042, -1.0055, -6.0475, -6.7524, -3.9801, -1.4434, 0.4740, -0.1584, -4.5457, -8.5746, -8.8428, -13.1475, -9.6079, -8.5798, -4.1143, -3.7966, -7.1651, -6.1517, -8.0258, -12.1486 ], [ -10.2017, -7.9924, -5.9517, -3.9372, -1.9735, -4.3130, 16.1647, 25.0592, 23.5532, 14.4974, -7.0778, -10.2262, 6.4782, 20.3454, 19.4269, 1.7976, -16.5070, 4.9380, 12.3390, 6.9285, -13.6325, -8.5298, 1.0839, -5.9629, -8.4812, 3.1331, -2.0963, -16.6046, -14.0070, -17.5707, -13.2080, -17.2168, -17.7770, -12.1111, -18.6184, -17.1897, -13.9801, -12.0426, -23.5400, -25.6823, -23.5813, -18.7847, -20.5473, -25.6458, -19.7585, -27.6007, -28.9276, -24.8948, -25.4458, -22.2807, -19.6613, -19.2669, -15.7813, -19.6821, -24.3439, -22.2598, -28.2631, -30.1017, -32.7646, -33.6525, -27.5639, -22.0548, -27.8054, -29.6947 ], [ -9.2078, -7.2963, -6.2095, -7.9959, -2.9280, -11.1843, -6.1490, 5.0733, 19.2957, 21.4578, 14.6803, -3.3153, -6.3334, -2.3542, 6.9509, 15.2965, 14.6620, 5.2075, -0.0873, 1.1919, 18.1986, 20.8470, 10.8035, 2.2516, 7.6905, 7.7427, -1.2543, -5.0018, 0.9809, -2.1584, -5.4580, -5.4760, -11.8888, -9.0605, -8.4638, -9.9897, -0.0540, -5.1629, 0.0483, -4.1504, -4.8140, -7.8236, -9.0622, -10.1742, -8.9597, -11.5380, -16.5603, -17.1858, -17.5032, -20.9326, -23.9543, -25.2602, -25.3429, -27.4536, -26.8859, -22.7852, -25.8288, -24.8399, -23.8893, -24.2096, -26.5415, -23.7281, -25.6851, -22.3629 ], [ 1.3448, 2.9883, 4.0366, -0.8019, -10.4191, -10.0883, -4.3812, 0.8136, 2.1579, 0.0832, 1.0949, -0.9759, -5.5319, -4.6009, -6.5452, -14.9155, -20.1584, -9.3611, -2.4271, 1.4031, 4.9910, 8.6916, 8.6785, 10.1973, 9.9029, 5.3840, 7.5336, 5.2803, 2.8144, -0.3138, 2.2216, 5.7328, 7.5574, 7.7402, 1.0681, 3.1049, 7.0742, 6.5588, 7.3712, 5.7881, 8.6874, 8.7725, 2.8133, -4.5809, -6.1317, -5.1719, -5.0192, -9.0977, -10.9391, -6.0769, 1.6016, -0.8965, -7.2252, -7.8632, -11.4468, -11.7446, -10.7447, -7.0601, -2.7748, -4.1798, -2.8433, -3.1352, 0.8097, 6.4212 ] ] ) MEL_BIN = 963 input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, block_idx in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, [1, 2, 0, 3] ): set_seed(987654321) input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features self.assertEqual(input_features.shape, (1, 4, 1001, 64)) self.assertTrue(torch.allclose(input_features[0, block_idx, MEL_BIN], EXPECTED_VALUES, atol=1e-3)) def test_integration_rand_trunc_long_input(self): EXPECTED_INPUT_FEATURES = torch.tensor( [ [ -35.4022, -32.7555, -31.2004, -32.7764, -42.5770, -41.6339, -43.1630, -44.5080, -44.3029, -48.9628, -39.5022, -39.2105, -43.1350, -43.2195, -48.4894, -52.2344, -57.6891, -52.2228, -45.5155, -44.2893, -43.4697, -46.6702, -43.7490, -40.4819, -42.7275, -46.3434, -46.8412, -41.2003, -43.1681, -46.2948, -46.1925, -47.8333, -45.6812, -44.9182, -41.7786, -43.3809, -44.3199, -42.8814, -45.4771, -46.7114, -46.9746, -42.7090, -41.6057, -38.3965, -40.1980, -41.0263, -34.1256, -28.3289, -29.0201, -30.4453, -29.5561, -30.1734, -25.9406, -19.0897, -15.8452, -20.1351, -23.6515, -23.1194, -17.1845, -19.4399, -23.6527, -22.8768, -20.7279, -22.7864 ], [ -35.7719, -27.2566, -23.6964, -27.5521, 0.2510, 7.4391, 1.3917, -13.3417, -28.1758, -17.0856, -5.7723, -0.8000, -7.8832, -15.5548, -30.5935, -24.7571, -13.7009, -10.3432, -21.2464, -24.8118, -19.4080, -14.9779, -11.7991, -18.4485, -20.1982, -17.3652, -20.6328, -28.2967, -25.7819, -21.8962, -28.5083, -29.5719, -30.2120, -35.7033, -31.8218, -34.0408, -37.7744, -33.9653, -31.3009, -30.9063, -28.6153, -32.2202, -28.5456, -28.8579, -32.5170, -37.9152, -43.0052, -46.4849, -44.0786, -39.1933, -33.2757, -31.6313, -42.6386, -52.3679, -53.5785, -55.6444, -47.0050, -47.6459, -56.6361, -60.6781, -61.5244, -55.8272, -60.4832, -58.1897 ], [ -38.2686, -36.6285, -32.5835, -35.1693, -37.7938, -37.4035, -35.3132, -35.6083, -36.3609, -40.9472, -36.7846, -36.1544, -38.9076, -39.3618, -35.4953, -34.2809, -39.9466, -39.7433, -34.8347, -37.5674, -41.5689, -38.9161, -34.3947, -30.2924, -30.4841, -34.5831, -28.9261, -24.8849, -31.2324, -27.1622, -27.2107, -25.9385, -30.1691, -30.9223, -23.9495, -25.6047, -26.7119, -28.5523, -27.7481, -32.8427, -35.4650, -31.0399, -31.2073, -30.5163, -22.9819, -20.8892, -19.2510, -24.7905, -28.9426, -28.1998, -26.7386, -25.0140, -27.9223, -32.9913, -33.1864, -34.9742, -38.5995, -39.6990, -29.3203, -22.4697, -25.6415, -33.5608, -33.0945, -27.1716 ], [ -33.2015, -28.7741, -21.9457, -23.4888, -32.1072, -8.6307, 3.2724, 5.9157, -0.9221, -30.1814, -31.0015, -27.4508, -27.0477, -9.5342, 0.3221, 0.6511, -7.1596, -25.9707, -32.8924, -32.2300, -13.8974, -0.4895, 0.9168, -10.7663, -27.1176, -35.0829, -11.6859, -4.8855, -11.8898, -26.6167, -5.6192, -3.8443, -19.7947, -14.4101, -8.6236, -21.2458, -21.0801, -17.9136, -24.4663, -18.6333, -24.8085, -15.5854, -15.4344, -11.5046, -22.3625, -27.3387, -32.4353, -30.9670, -31.3789, -35.4044, -34.4591, -25.2433, -28.0773, -33.8736, -33.0224, -33.3155, -38.5302, -39.2741, -36.6395, -34.7729, -32.4483, -42.4001, -49.2857, -39.1682 ] ] ) MEL_BIN = 963 SEEDS = [987654321, 1234, 666, 5555] input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) feature_extractor = ClapFeatureExtractor() for padding, EXPECTED_VALUES, seed in zip( ["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, SEEDS ): set_seed(seed) input_features = feature_extractor( input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding ).input_features self.assertEqual(input_features.shape, (1, 1, 1001, 64)) self.assertTrue(torch.allclose(input_features[0, 0, MEL_BIN], EXPECTED_VALUES, atol=1e-4))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch clap model import inspect import os import tempfile import unittest import numpy as np from datasets import loaddataset from transformers import clapaudioconfig clapconfig clapprocessor claptextconfig from transformers testingutils import requiretorch slow torchdevice from transformers utils import istorchavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import clapaudiomodel clapaudiomodelwithprojection clapmodel claptextmodel claptextmodelwithprojection from transformers models clap modelingclap import clappretrainedmodelarchivelist class clapaudiomodeltester def init self parent batchsize12 imagesize60 nummelbins16 windowsize4 specsize64 patchsize2 patchstride2 seqlength16 freqratio2 numchannels3 istrainingtrue hiddensize32 patchembedshiddensize16 projectiondim32 depths2 2 numhiddenlayers2 numheads2 2 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self nummelbins nummelbins self windowsize windowsize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self depths depths self numheads numheads self numattentionheads numheads0 self seqlength seqlength self specsize specsize self freqratio freqratio self patchstride patchstride self patchembedshiddensize patchembedshiddensize self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope def prepareconfigandinputsself inputfeatures floatstensorself batchsize 1 self hiddensize self nummelbins config self getconfig return config inputfeatures def getconfigself return clapaudioconfig imagesizeself imagesize patchsizeself patchsize nummelbinsself nummelbins windowsizeself windowsize numchannelsself numchannels hiddensizeself hiddensize patchstrideself patchstride projectiondimself projectiondim depthsself depths numhiddenlayersself numhiddenlayers numattentionheadsself numheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange specsizeself specsize freqratioself freqratio patchembedshiddensizeself patchembedshiddensize def createandcheckmodelself config inputfeatures model clapaudiomodelconfigconfig model totorchdevice model eval with torch nograd result modelinputfeatures self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckmodelwithprojectionself config inputfeatures model clapaudiomodelwithprojectionconfigconfig model totorchdevice model eval with torch nograd result modelinputfeatures self parent assertequalresult audioembeds shape self batchsize self projectiondim def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputfeatures configandinputs inputsdict inputfeatures inputfeatures return config inputsdict requiretorch class clapaudiomodeltestmodeltestermixin unittest testcase allmodelclasses clapaudiomodel clapaudiomodelwithprojection if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester clapaudiomodeltesterself self configtester configtesterself configclassclapaudioconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonclapaudiomodel does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers self assertlistequal listhiddenstates0 shape2 2 self modeltester patchembedshiddensize 2 self modeltester patchembedshiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass unittest skipreasonclapaudiomodel does not output any loss term in the forward pass def testretaingradhiddenstatesattentionsself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputfeatures self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelwithprojectionself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithprojectionconfigandinputs unittest skipreasonclapaudiomodel does not output any loss term in the forward pass def testtrainingself pass unittest skipreasonclapaudiomodel does not output any loss term in the forward pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonclapaudiomodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonclapaudiomodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in clappretrainedmodelarchivelist 1 model clapaudiomodel frompretrainedmodelname self assertisnotnonemodel slow def testmodelwithprojectionfrompretrainedself for modelname in clappretrainedmodelarchivelist 1 model clapaudiomodelwithprojection frompretrainedmodelname self assertisnotnonemodel self asserttruehasattrmodel audioprojection class claptextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 scopenone projectionhiddenactrelu self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope self projectionhiddenact projectionhiddenact def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return claptextconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange projectionhiddenactself projectionhiddenact def createandcheckmodelself config inputids inputmask model claptextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckmodelwithprojectionself config inputids inputmask model claptextmodelwithprojectionconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult textembeds shape self batchsize self projectiondim def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class claptextmodeltestmodeltestermixin unittest testcase allmodelclasses claptextmodel claptextmodelwithprojection if istorchavailable else fxcompatible false testpruning false testheadmasking false def setupself self modeltester claptextmodeltesterself self configtester configtesterself configclassclaptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelwithprojectionself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithprojectionconfigandinputs unittest skipreasonclaptextmodel does not output any loss term in the forward pass def testtrainingself pass unittest skipreasonclaptextmodel does not output any loss term in the forward pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonclaptextmodel does not use inputsembeds def testinputsembedsself pass unittest skipreasonclaptextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonclaptextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in clappretrainedmodelarchivelist 1 model claptextmodel frompretrainedmodelname self assertisnotnonemodel slow def testmodelwithprojectionfrompretrainedself for modelname in clappretrainedmodelarchivelist 1 model claptextmodelwithprojection frompretrainedmodelname self assertisnotnonemodel self asserttruehasattrmodel textprojection class clapmodeltester def initself parent textkwargsnone audiokwargsnone istrainingtrue if textkwargs is none textkwargs if audiokwargs is none audiokwargs self parent parent self textmodeltester claptextmodeltesterparent textkwargs self audiomodeltester clapaudiomodeltesterparent audiokwargs self istraining istraining def prepareconfigandinputsself inputids attentionmask self textmodeltester prepareconfigandinputs inputfeatures self audiomodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask inputfeatures def getconfigself return clapconfig fromtextaudioconfigs self textmodeltester getconfig self audiomodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask inputfeatures model clapmodelconfig totorchdevice eval with torch nograd result modelinputids inputfeatures attentionmask self parent assertequal result logitsperaudio shape self audiomodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self audiomodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask inputfeatures configandinputs inputsdict inputids inputids attentionmask attentionmask inputfeatures inputfeatures returnloss true return config inputsdict requiretorch class clapmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses clapmodel if istorchavailable else pipelinemodelmapping featureextraction clapmodel if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false def setupself self modeltester clapmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonclapmodel does not have inputoutput embeddings def testmodelcommonattributesself pass override as the logitscale parameter initilization is different for clap def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids inputfeatures inputsdictinputfeatures clap needs inputfeatures tracedmodel torch jit tracemodel inputids inputfeatures except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadaudiotextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save clapconfig and check if we can load clapaudioconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname audioconfig clapaudioconfig frompretrainedtmpdirname self assertdictequalconfig audioconfig todict audioconfig todict save clapconfig and check if we can load claptextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig claptextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in clappretrainedmodelarchivelist 1 model clapmodel frompretrainedmodelname self assertisnotnonemodel slow requiretorch class clapmodelintegrationtestunittest testcase paddings repeatpad repeat pad def testintegrationunfusedself expectedmeansunfused repeatpad 0 0024 pad 0 0020 repeat 0 0023 librispeechdummy loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation audiosample librispeechdummy1 modelid laionclaphtsatunfused model clapmodel frompretrainedmodelid totorchdevice processor clapprocessor frompretrainedmodelid for padding in self paddings inputs processoraudiosaudiosampleaudioarray returntensorspt paddingpadding to torchdevice audioembed model getaudiofeaturesinputs expectedmean expectedmeansunfusedpadding self asserttrue torch allcloseaudioembed cpu mean torch tensorexpectedmean atol1e3 rtol1e3 def testintegrationfusedself expectedmeansfused repeatpad 0 00069 repeat 0 00196 pad 0 000379 librispeechdummy loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation audiosample librispeechdummy1 modelid laionclaphtsatfused model clapmodel frompretrainedmodelid totorchdevice processor clapprocessor frompretrainedmodelid for padding in self paddings inputs processor audiosaudiosampleaudioarray returntensorspt paddingpadding truncationfusion totorchdevice audioembed model getaudiofeaturesinputs expectedmean expectedmeansfusedpadding self asserttrue torch allcloseaudioembed cpu mean torch tensorexpectedmean atol1e3 rtol1e3 def testbatchedfusedself expectedmeansfused repeatpad 0 0010 repeat 0 0020 pad 0 0006 librispeechdummy loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation audiosamples samplearray for sample in librispeechdummy0 4audio modelid laionclaphtsatfused model clapmodel frompretrainedmodelid totorchdevice processor clapprocessor frompretrainedmodelid for padding in self paddings inputs processoraudiosaudiosamples returntensorspt paddingpadding truncationfusion to torchdevice audioembed model getaudiofeaturesinputs expectedmean expectedmeansfusedpadding self asserttrue torch allcloseaudioembed cpu mean torch tensorexpectedmean atol1e3 rtol1e3 def testbatchedunfusedself expectedmeansfused repeatpad 0 0016 repeat 0 0019 pad 0 0019 librispeechdummy loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation audiosamples samplearray for sample in librispeechdummy0 4audio modelid laionclaphtsatunfused model clapmodel frompretrainedmodelid totorchdevice processor clapprocessor frompretrainedmodelid for padding in self paddings inputs processoraudiosaudiosamples returntensorspt paddingpadding totorchdevice audioembed model getaudiofeaturesinputs expectedmean expectedmeansfusedpadding self asserttrue torch allcloseaudioembed cpu mean torch tensorexpectedmean atol1e3 rtol1e3 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch clap model here we also overwrite some of the tests of test_modeling_common py as clap does not use input_ids inputs_embeds attention_mask and seq_length check that output_hidden_states also work using config signature parameters is an ordereddict so arg_names order is deterministic override as the logit_scale parameter initilization is different for clap check if logit_scale is initilized as per the original implementation to be sure we have no nan clap needs input_features save clapconfig and check if we can load clapaudioconfig from it save clapconfig and check if we can load claptextconfig from it
import inspect import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import ClapAudioConfig, ClapConfig, ClapProcessor, ClapTextConfig from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapTextModel, ClapTextModelWithProjection, ) from transformers.models.clap.modeling_clap import CLAP_PRETRAINED_MODEL_ARCHIVE_LIST class ClapAudioModelTester: def __init__( self, parent, batch_size=12, image_size=60, num_mel_bins=16, window_size=4, spec_size=64, patch_size=2, patch_stride=2, seq_length=16, freq_ratio=2, num_channels=3, is_training=True, hidden_size=32, patch_embeds_hidden_size=16, projection_dim=32, depths=[2, 2], num_hidden_layers=2, num_heads=[2, 2], intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_mel_bins = num_mel_bins self.window_size = window_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.depths = depths self.num_heads = num_heads self.num_attention_heads = num_heads[0] self.seq_length = seq_length self.spec_size = spec_size self.freq_ratio = freq_ratio self.patch_stride = patch_stride self.patch_embeds_hidden_size = patch_embeds_hidden_size self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, 1, self.hidden_size, self.num_mel_bins]) config = self.get_config() return config, input_features def get_config(self): return ClapAudioConfig( image_size=self.image_size, patch_size=self.patch_size, num_mel_bins=self.num_mel_bins, window_size=self.window_size, num_channels=self.num_channels, hidden_size=self.hidden_size, patch_stride=self.patch_stride, projection_dim=self.projection_dim, depths=self.depths, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, spec_size=self.spec_size, freq_ratio=self.freq_ratio, patch_embeds_hidden_size=self.patch_embeds_hidden_size, ) def create_and_check_model(self, config, input_features): model = ClapAudioModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_features) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, input_features): model = ClapAudioModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_features) self.parent.assertEqual(result.audio_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_features = config_and_inputs inputs_dict = {"input_features": input_features} return config, inputs_dict @require_torch class ClapAudioModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ClapAudioModel, ClapAudioModelWithProjection) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ClapAudioModelTester(self) self.config_tester = ConfigTester(self, config_class=ClapAudioConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ClapAudioModel does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [2 * self.model_tester.patch_embeds_hidden_size, 2 * self.model_tester.patch_embeds_hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass") def test_retain_grad_hidden_states_attentions(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass") def test_training(self): pass @unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ClapAudioModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ClapAudioModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapAudioModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapAudioModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "audio_projection")) class ClapTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, projection_hidden_act="relu", ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.projection_hidden_act = projection_hidden_act def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return ClapTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, projection_hidden_act=self.projection_hidden_act, ) def create_and_check_model(self, config, input_ids, input_mask): model = ClapTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, input_ids, input_mask): model = ClapTextModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.text_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ClapTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ClapTextModel, ClapTextModelWithProjection) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = ClapTextModelTester(self) self.config_tester = ConfigTester(self, config_class=ClapTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @unittest.skip(reason="ClapTextModel does not output any loss term in the forward pass") def test_training(self): pass @unittest.skip(reason="ClapTextModel does not output any loss term in the forward pass") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ClapTextModel does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ClapTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ClapTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapTextModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "text_projection")) class ClapModelTester: def __init__(self, parent, text_kwargs=None, audio_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if audio_kwargs is None: audio_kwargs = {} self.parent = parent self.text_model_tester = ClapTextModelTester(parent, **text_kwargs) self.audio_model_tester = ClapAudioModelTester(parent, **audio_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): _, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() _, input_features = self.audio_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, input_features def get_config(self): return ClapConfig.from_text_audio_configs( self.text_model_tester.get_config(), self.audio_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, input_features): model = ClapModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, input_features, attention_mask) self.parent.assertEqual( result.logits_per_audio.shape, (self.audio_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.audio_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, input_features = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "input_features": input_features, "return_loss": True, } return config, inputs_dict @require_torch class ClapModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ClapModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ClapModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = ClapModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ClapModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] input_features = inputs_dict["input_features"] traced_model = torch.jit.trace(model, (input_ids, input_features)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_audio_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) audio_config = ClapAudioConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.audio_config.to_dict(), audio_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = ClapTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch class ClapModelIntegrationTest(unittest.TestCase): paddings = ["repeatpad", "repeat", "pad"] def test_integration_unfused(self): EXPECTED_MEANS_UNFUSED = { "repeatpad": 0.0024, "pad": 0.0020, "repeat": 0.0023, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_sample = librispeech_dummy[-1] model_id = "laion/clap-htsat-unfused" model = ClapModel.from_pretrained(model_id).to(torch_device) processor = ClapProcessor.from_pretrained(model_id) for padding in self.paddings: inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt", padding=padding).to( torch_device ) audio_embed = model.get_audio_features(**inputs) expected_mean = EXPECTED_MEANS_UNFUSED[padding] self.assertTrue( torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3) ) def test_integration_fused(self): EXPECTED_MEANS_FUSED = { "repeatpad": 0.00069, "repeat": 0.00196, "pad": -0.000379, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_sample = librispeech_dummy[-1] model_id = "laion/clap-htsat-fused" model = ClapModel.from_pretrained(model_id).to(torch_device) processor = ClapProcessor.from_pretrained(model_id) for padding in self.paddings: inputs = processor( audios=audio_sample["audio"]["array"], return_tensors="pt", padding=padding, truncation="fusion" ).to(torch_device) audio_embed = model.get_audio_features(**inputs) expected_mean = EXPECTED_MEANS_FUSED[padding] self.assertTrue( torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3) ) def test_batched_fused(self): EXPECTED_MEANS_FUSED = { "repeatpad": 0.0010, "repeat": 0.0020, "pad": 0.0006, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_samples = [sample["array"] for sample in librispeech_dummy[0:4]["audio"]] model_id = "laion/clap-htsat-fused" model = ClapModel.from_pretrained(model_id).to(torch_device) processor = ClapProcessor.from_pretrained(model_id) for padding in self.paddings: inputs = processor(audios=audio_samples, return_tensors="pt", padding=padding, truncation="fusion").to( torch_device ) audio_embed = model.get_audio_features(**inputs) expected_mean = EXPECTED_MEANS_FUSED[padding] self.assertTrue( torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3) ) def test_batched_unfused(self): EXPECTED_MEANS_FUSED = { "repeatpad": 0.0016, "repeat": 0.0019, "pad": 0.0019, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_samples = [sample["array"] for sample in librispeech_dummy[0:4]["audio"]] model_id = "laion/clap-htsat-unfused" model = ClapModel.from_pretrained(model_id).to(torch_device) processor = ClapProcessor.from_pretrained(model_id) for padding in self.paddings: inputs = processor(audios=audio_samples, return_tensors="pt", padding=padding).to(torch_device) audio_embed = model.get_audio_features(**inputs) expected_mean = EXPECTED_MEANS_FUSED[padding] self.assertTrue( torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3) )
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class ClapProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "laion/clap-htsat-unfused" self.tmpdirname = tempfile.mkdtemp() def get_tokenizer(self, **kwargs): return RobertaTokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return ClapFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = ClapProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, RobertaTokenizerFast) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, ClapFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = ClapProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = ClapProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, RobertaTokenizerFast) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, ClapFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(audios=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names[2:], feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", )
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import CLIPImageProcessor class CLIPImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class CLIPImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = CLIPImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = CLIPImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch clip model import inspect import os import tempfile import unittest import numpy as np import requests import transformers from transformers import clipconfig cliptextconfig clipvisionconfig from transformers testingutils import isflaxavailable isptflaxcrosstest requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import clipmodel cliptextmodel cliptextmodelwithprojection clipvisionmodel clipvisionmodelwithprojection from transformers models clip modelingclip import clippretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import clipprocessor if isflaxavailable import jax numpy as jnp from transformers modelingflaxpytorchutils import convertpytorchstatedicttoflax loadflaxweightsinpytorchmodel class clipvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return clipvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model clipvisionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckmodelwithprojectionself config pixelvalues model clipvisionmodelwithprojectionconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult imageembeds shape self batchsize self projectiondim def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class clipvisionmodeltestmodeltestermixin unittest testcase allmodelclasses clipvisionmodel clipvisionmodelwithprojection if istorchavailable else fxcompatible true testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester clipvisionmodeltesterself self configtester configtesterself configclassclipvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonclip does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelwithprojectionself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithprojectionconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonclipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonclipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in clippretrainedmodelarchivelist 1 model clipvisionmodel frompretrainedmodelname self assertisnotnonemodel slow def testmodelwithprojectionfrompretrainedself for modelname in clippretrainedmodelarchivelist 1 model clipvisionmodelwithprojection frompretrainedmodelname self assertisnotnonemodel self asserttruehasattrmodel visualprojection class cliptextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return cliptextconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange def createandcheckmodelself config inputids inputmask model cliptextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckmodelwithprojectionself config inputids inputmask model cliptextmodelwithprojectionconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult textembeds shape self batchsize self projectiondim def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class cliptextmodeltestmodeltestermixin unittest testcase allmodelclasses cliptextmodel cliptextmodelwithprojection if istorchavailable else fxcompatible true testpruning false testheadmasking false modelsplitpercents 0 5 0 8 0 9 def setupself self modeltester cliptextmodeltesterself self configtester configtesterself configclasscliptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelwithprojectionself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithprojectionconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonclip does not use inputsembeds def testinputsembedsself pass unittest skipreasoncliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasoncliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in clippretrainedmodelarchivelist 1 model cliptextmodel frompretrainedmodelname self assertisnotnonemodel slow def testmodelwithprojectionfrompretrainedself for modelname in clippretrainedmodelarchivelist 1 model cliptextmodelwithprojection frompretrainedmodelname self assertisnotnonemodel self asserttruehasattrmodel textprojection class clipmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester cliptextmodeltesterparent textkwargs self visionmodeltester clipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return clipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model clipmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues returnloss true return config inputsdict requiretorch class clipmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses clipmodel if istorchavailable else pipelinemodelmapping featureextraction clipmodel if istorchavailable else fxcompatible true testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false def setupself self modeltester clipmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonclipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass override as the logitscale parameter initilization is different for clip def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues clip needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save clipconfig and check if we can load clipvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig clipvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save clipconfig and check if we can load cliptextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig cliptextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict overwrite from common since flaxclipmodel returns nested output which is not supported in the common test isptflaxcrosstest def testequivalencepttoflaxself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses with self subtestmodelclass name load pytorch class ptmodel modelclassconfig eval flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model ptmodel config usecache false fxmodelclassname flax modelclass name if not hasattrtransformers fxmodelclassname return fxmodelclass getattrtransformers fxmodelclassname load flax class fxmodel fxmodelclassconfig dtypejnp float32 make sure only flax inputs are forward that actually exist in function args fxinputkeys inspect signaturefxmodel call parameters keys prepare inputs ptinputs self prepareforclassinputsdict modelclass remove function args that don t exist in flax ptinputs k v for k v in ptinputs items if k in fxinputkeys fxstate convertpytorchstatedicttoflaxptmodel statedict fxmodel fxmodel params fxstate with torch nograd ptoutputs ptmodelptinputs totuple convert inputs to flax fxinputs k np arrayv tocpu for k v in ptinputs items if torch istensorv fxoutputs fxmodelfxinputs totuple self assertequallenfxoutputs lenptoutputs output lengths differ between flax and pytorch for fxoutput ptoutput in zipfxoutputs 4 ptoutputs 4 self assertalmostequalsfxoutput ptoutput numpy 4e2 with tempfile temporarydirectory as tmpdirname ptmodel savepretrainedtmpdirname fxmodelloaded fxmodelclass frompretrainedtmpdirname frompttrue fxoutputsloaded fxmodelloadedfxinputs totuple self assertequal lenfxoutputsloaded lenptoutputs output lengths differ between flax and pytorch for fxoutputloaded ptoutput in zipfxoutputsloaded 4 ptoutputs 4 self assertalmostequalsfxoutputloaded ptoutput numpy 4e2 overwrite from common since flaxclipmodel returns nested output which is not supported in the common test isptflaxcrosstest def testequivalenceflaxtoptself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses with self subtestmodelclass name load corresponding pytorch class ptmodel modelclassconfig eval so we disable usecache here for pytorch model ptmodel config usecache false fxmodelclassname flax modelclass name if not hasattrtransformers fxmodelclassname no flax model exists for this class return fxmodelclass getattrtransformers fxmodelclassname load flax class fxmodel fxmodelclassconfig dtypejnp float32 make sure only flax inputs are forward that actually exist in function args fxinputkeys inspect signaturefxmodel call parameters keys ptmodel loadflaxweightsinpytorchmodelptmodel fxmodel params make sure weights are tied in pytorch ptmodel tieweights prepare inputs ptinputs self prepareforclassinputsdict modelclass remove function args that don t exist in flax ptinputs k v for k v in ptinputs items if k in fxinputkeys with torch nograd ptoutputs ptmodelptinputs totuple fxinputs k np arrayv tocpu for k v in ptinputs items if torch istensorv fxoutputs fxmodelfxinputs totuple self assertequallenfxoutputs lenptoutputs output lengths differ between flax and pytorch for fxoutput ptoutput in zipfxoutputs 4 ptoutputs 4 self assertalmostequalsfxoutput ptoutput numpy 4e2 with tempfile temporarydirectory as tmpdirname fxmodel savepretrainedtmpdirname ptmodelloaded modelclass frompretrainedtmpdirname fromflaxtrue with torch nograd ptoutputsloaded ptmodelloadedptinputs totuple self assertequal lenfxoutputs lenptoutputsloaded output lengths differ between flax and pytorch for fxoutput ptoutput in zipfxoutputs 4 ptoutputsloaded 4 self assertalmostequalsfxoutput ptoutput numpy 4e2 slow def testmodelfrompretrainedself for modelname in clippretrainedmodelarchivelist 1 model clipmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg url http images cocodataset orgval2017000000039769 jpg im image openrequests geturl streamtrue raw return im requirevision requiretorch class clipmodelintegrationtestunittest testcase slow def testinferenceself modelname openaiclipvitbasepatch32 model clipmodel frompretrainedmodelname totorchdevice processor clipprocessor frompretrainedmodelname image prepareimg inputs processor texta photo of a cat a photo of a dog imagesimage paddingtrue returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits self assertequal outputs logitsperimage shape torch sizeinputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape torch sizeinputs inputids shape0 inputs pixelvalues shape0 expectedlogits torch tensor24 5701 19 3049 devicetorchdevice self asserttruetorch allcloseoutputs logitsperimage expectedlogits atol1e3 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch clip model in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as clip does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic override as the logit_scale parameter initilization is different for clip check if logit_scale is initilized as per the original implementation to be sure we have no nan clip needs pixel_values save clipconfig and check if we can load clipvisionconfig from it save clipconfig and check if we can load cliptextconfig from it overwrite from common since flaxclipmodel returns nested output which is not supported in the common test load pytorch class flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax convert inputs to flax overwrite from common since flaxclipmodel returns nested output which is not supported in the common test load corresponding pytorch class so we disable use_cache here for pytorch model no flax model exists for this class load flax class make sure only flax inputs are forward that actually exist in function args make sure weights are tied in pytorch prepare inputs remove function args that don t exist in flax we will verify our results on an image of cute cats forward pass verify the logits
import inspect import os import tempfile import unittest import numpy as np import requests import transformers from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig from transformers.testing_utils import ( is_flax_available, is_pt_flax_cross_test, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( CLIPModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) from transformers.models.clip.modeling_clip import CLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import CLIPProcessor if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) class CLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = CLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, pixel_values): model = CLIPVisionModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.image_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class CLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (CLIPVisionModel, CLIPVisionModelWithProjection) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = CLIPVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPVisionModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "visual_projection")) class CLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return CLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = CLIPTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, input_ids, input_mask): model = CLIPTextModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.text_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (CLIPTextModel, CLIPTextModelWithProjection) if is_torch_available() else () fx_compatible = True test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = CLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="CLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPTextModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "text_projection")) class CLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = CLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = CLIPVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return CLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = CLIPModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": CLIPModel} if is_torch_available() else {} fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = CLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="CLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = CLIPVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = CLIPTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return fx_model_class = getattr(transformers, fx_model_class_name) fx_model = fx_model_class(config, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return fx_model_class = getattr(transformers, fx_model_class_name) fx_model = fx_model_class(config, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) pt_model.tie_weights() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class CLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "openai/clip-vit-base-patch32" model = CLIPModel.from_pretrained(model_name).to(torch_device) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[24.5701, 19.3049]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
here we also overwrite some of the tests of testmodelingcommon py as clip does not use inputids inputsembeds attentionmask and seqlength signature parameters is an ordereddict so argnames order is deterministic clip has a different seqlength check that outputhiddenstates also work using config in clip the seqlen equals the number of patches 1 we add 1 for the cls token check that outputattentions also work using config check attention is always last and order is fine flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxcliptextmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model hiddenstates are tested in individual model tests signature parameters is an ordereddict so argnames order is deterministic overwrite from common since flaxclipmodel returns nested output which is not supported in the common test prepare inputs load corresponding pytorch class overwrite from common since flaxclipmodel returns nested output which is not supported in the common test prepare inputs load corresponding pytorch class make sure weights are tied in pytorch overwrite from common since flaxclipmodel returns nested output which is not supported in the common test verify that normal savepretrained works as expected verify that savepretrained for distributed training with paramsparams works as expected here we also overwrite some of the tests of test_modeling_common py as clip does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic clip has a different seq_length check that output_hidden_states also work using config in clip the seq_len equals the number of patches 1 we add 1 for the cls token check that output_attentions also work using config check attention is always last and order is fine flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxcliptextmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model flaxclipvisionmodel does not have any base model hidden_states are tested in individual model tests signature parameters is an ordereddict so arg_names order is deterministic overwrite from common since flaxclipmodel returns nested output which is not supported in the common test prepare inputs load corresponding pytorch class skip the flax at the beginning overwrite from common since flaxclipmodel returns nested output which is not supported in the common test prepare inputs load corresponding pytorch class skip the flax at the beginning make sure weights are tied in pytorch overwrite from common since flaxclipmodel returns nested output which is not supported in the common test verify that normal save_pretrained works as expected verify that save_pretrained for distributed training with params params works as expected
import inspect import tempfile import unittest import numpy as np import transformers from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.clip.modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPTextModel, FlaxCLIPTextModelWithProjection, FlaxCLIPVisionModel, ) if is_torch_available(): import torch class FlaxCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = CLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class FlaxCLIPVisionModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxCLIPVisionModel,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxCLIPVisionModelTester(self) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(pixel_values, **kwargs): return model(pixel_values=pixel_values, **kwargs).to_tuple() with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict) with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict) self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], ) def test_save_load_from_base(self): pass def test_save_load_to_base(self): pass @is_pt_flax_cross_test def test_save_load_from_base_pt(self): pass @is_pt_flax_cross_test def test_save_load_to_base_pt(self): pass @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): pass @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True) outputs = model(np.ones((1, 3, 224, 224))) self.assertIsNotNone(outputs) class FlaxCLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = CLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_flax class FlaxCLIPTextModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxCLIPTextModel, FlaxCLIPTextModelWithProjection) if is_flax_available() else () def setUp(self): self.model_tester = FlaxCLIPTextModelTester(self) def test_save_load_from_base(self): pass def test_save_load_to_base(self): pass @is_pt_flax_cross_test def test_save_load_from_base_pt(self): pass @is_pt_flax_cross_test def test_save_load_to_base_pt(self): pass @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): pass @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True) outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs) class FlaxCLIPModelTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = FlaxCLIPTextModelTester(parent) self.vision_model_tester = FlaxCLIPVisionModelTester(parent) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = CLIPConfig.from_text_vision_configs(text_config, vision_config, projection_dim=64) return config, input_ids, attention_mask, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_flax class FlaxCLIPModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxCLIPModel,) if is_flax_available() else () test_attention_outputs = False def setUp(self): self.model_tester = FlaxCLIPModelTester(self) def test_hidden_states_output(self): pass def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, pixel_values, **kwargs): return model(input_ids=input_ids, pixel_values=pixel_values, **kwargs).to_tuple() with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict) with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict) self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs[:4], outputs[:4]): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_ids", "pixel_values", "attention_mask", "position_ids"] self.assertListEqual(arg_names[:4], expected_arg_names) def test_get_image_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = FlaxCLIPModel(config) @jax.jit def model_jitted(pixel_values): return model.get_image_features(pixel_values=pixel_values) with self.subTest("JIT Enabled"): jitted_output = model_jitted(inputs_dict["pixel_values"]) with self.subTest("JIT Disabled"): with jax.disable_jit(): output = model_jitted(inputs_dict["pixel_values"]) self.assertEqual(jitted_output.shape, output.shape) self.assertTrue(np.allclose(jitted_output, output, atol=1e-3)) def test_get_text_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = FlaxCLIPModel(config) @jax.jit def model_jitted(input_ids, attention_mask, **kwargs): return model.get_text_features(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_output = model_jitted(**inputs_dict) with self.subTest("JIT Disabled"): with jax.disable_jit(): output = model_jitted(**inputs_dict) self.assertEqual(jitted_output.shape, output.shape) self.assertTrue(np.allclose(jitted_output, output, atol=1e-3)) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True) outputs = model(input_ids=np.ones((1, 1)), pixel_values=np.ones((1, 3, 224, 224))) self.assertIsNotNone(outputs) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) def test_from_pretrained_save_pretrained(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ != "FlaxBertModel": continue with self.subTest(model_class.__name__): model = model_class(config) prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs_dict).to_tuple() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()[:4] for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=model.params) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()[:4] for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow clip model from future import annotations import inspect import os import tempfile import unittest from importlib import importmodule import requests from transformers import clipconfig cliptextconfig clipvisionconfig from transformers testingutils import requiretf requirevision slow from transformers utils import istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfclipmodel tfcliptextmodel tfclipvisionmodel tfsharedembeddings from transformers models clip modelingtfclip import tfclippretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import clipprocessor class tfclipvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return clipvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model tfclipvisionmodelconfigconfig result modelpixelvalues trainingfalse expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfclipvisionmodeltesttfmodeltestermixin unittest testcase allmodelclasses tfclipvisionmodel if istfavailable else testpruning false testresizeembeddings false testheadmasking false testonnx false def setupself self modeltester tfclipvisionmodeltesterself self configtester configtesterself configclassclipvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests def testinputsembedsself clip does not use inputsembeds pass def testgraphmodewithinputsembedsself clip does not use inputsembeds pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings tf keras layers layer x model getoutputembeddings self asserttruex is none or isinstancex tf keras layers layer def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true in clip the seqlen equals the number of patches 1 we add 1 for the cls token imagesize self modeltester imagesize self modeltester imagesize patchsize self modeltester patchsize self modeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 1 for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads seqlen seqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers clip has a different seqlength imagesize self modeltester imagesize self modeltester imagesize patchsize self modeltester patchsize self modeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlength numpatches 1 self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass slow def testmodelfrompretrainedself for modelname in tfclippretrainedmodelarchivelist 1 model tfclipvisionmodel frompretrainedmodelname self assertisnotnonemodel slow def testsavedmodelcreationextendedself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true if hasattrconfig usecache config usecache true in clip the seqlen equals the number of patches 1 we add 1 for the cls token imagesize self modeltester imagesize self modeltester imagesize patchsize self modeltester patchsize self modeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 1 for modelclass in self allmodelclasses classinputsdict self prepareforclassinputsdict modelclass model modelclassconfig numout lenmodelclassinputsdict with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname savedmodeltrue savedmodeldir os path jointmpdirname savedmodel 1 model tf keras models loadmodelsavedmodeldir outputs modelclassinputsdict outputhiddenstates outputshiddenstates outputattentions outputsattentions check num outputs self assertequallenoutputs numout check num layers expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenoutputhiddenstates expectednumlayers self assertequallenoutputattentions self modeltester numhiddenlayers check attention outputs imagesize self modeltester imagesize self modeltester imagesize patchsize self modeltester patchsize self modeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 1 self assertlistequal listoutputattentions0 shape3 self modeltester numattentionheads seqlen seqlen check hidden states self assertlistequal listoutputhiddenstates0 shape2 seqlen self modeltester hiddensize class tfcliptextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength make sure the first token has attention mask 1 to ensure that after combining the causal mask there is still at least one token being attended to for each batch todo change randomattentionmask in pttfflax common test file after a discussion with the team inputmask tf concat tf oneslikeinputmask 1 dtypeinputmask dtype inputmask 1 axis1 config self getconfig return config inputids inputmask def getconfigself return cliptextconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange def createandcheckmodelself config inputids inputmask model tfcliptextmodelconfigconfig result modelinputids attentionmaskinputmask trainingfalse result modelinputids trainingfalse self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretf class tfcliptextmodeltesttfmodeltestermixin unittest testcase allmodelclasses tfcliptextmodel if istfavailable else testpruning false testheadmasking false testonnx false def setupself self modeltester tfcliptextmodeltesterself self configtester configtesterself configclasscliptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testinputsembedsself clip does not use inputsembeds pass slow def testmodelfrompretrainedself for modelname in tfclippretrainedmodelarchivelist 1 model tfcliptextmodel frompretrainedmodelname self assertisnotnonemodel slow def testsavedmodelcreationextendedself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true if hasattrconfig usecache config usecache true for modelclass in self allmodelclasses classinputsdict self prepareforclassinputsdict modelclass model modelclassconfig numout lenmodelclassinputsdict with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname savedmodeltrue savedmodeldir os path jointmpdirname savedmodel 1 model tf keras models loadmodelsavedmodeldir outputs modelclassinputsdict outputhiddenstates outputshiddenstates outputattentions outputsattentions check number of outputs self assertequallenoutputs numout check number of layers expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 check hidden states self assertequallenoutputhiddenstates expectednumlayers self assertlistequal listoutputhiddenstates0 shape2 self modeltester seqlength self modeltester hiddensize check attention outputs self assertequallenoutputattentions self modeltester numhiddenlayers seqlength self modeltester seqlength keylength getattrself modeltester keylength seqlength self assertlistequal listoutputattentions0 shape3 self modeltester numattentionheads seqlength keylength class tfclipmodeltester def initself parent istrainingtrue self parent parent self textmodeltester tfcliptextmodeltesterparent self visionmodeltester tfclipvisionmodeltesterparent self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return clipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model tfclipmodelconfig result modelinputids pixelvalues attentionmask trainingfalse self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues returnloss true return config inputsdict requiretf class tfclipmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfclipmodel if istfavailable else pipelinemodelmapping featureextraction tfclipmodel if istfavailable else testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testonnx false def setupself self modeltester tfclipmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs hiddenstates are tested in individual model tests def testhiddenstatesoutputself pass inputembeds are tested in individual model tests def testinputsembedsself pass clipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass overwrite from common since tfclipmodeltester set returnloss to true and causes the preparation of symbolicinputs failed def testkerassaveloadself config inputsdict self modeltester prepareconfigandinputsforcommon remove returnloss to make code work if self class name tfclipmodeltest inputsdict popreturnloss none tfmainlayerclasses modulemember for modelclass in self allmodelclasses for module in importmodulemodelclass module for modulemembername in dirmodule if modulemembername endswithmainlayer this condition is required since modelingtfclip py has 3 classes whose names end with mainlayer and modulemembername lenmainlayer modelclass name lenmodel for modulemember in getattrmodule modulemembername if isinstancemodulemember type and tf keras layers layer in modulemember bases and getattrmodulemember kerasserializable false for mainlayerclass in tfmainlayerclasses t5mainlayer needs an embedtokens parameter when called without the inputsembeds parameter if t5 in mainlayerclass name take the same values than in tft5modeltester for this shared layer shared tfsharedembeddings99 32 nameshared config usecache inputsdict popusecache none mainlayer mainlayerclassconfig embedtokensshared else mainlayer mainlayerclassconfig symbolicinputs name tf keras inputtensor shape1 dtypetensor dtype for name tensor in inputsdict items model tf keras modelsymbolicinputs outputsmainlayersymbolicinputs outputs modelinputsdict with tempfile temporarydirectory as tmpdirname filepath os path jointmpdirname kerasmodel h5 model savefilepath if t5 in mainlayerclass name model tf keras models loadmodel filepath customobjects mainlayerclass name mainlayerclass tfsharedembeddings tfsharedembeddings else model tf keras models loadmodel filepath customobjectsmainlayerclass name mainlayerclass assert isinstancemodel tf keras model afteroutputs modelinputsdict self assertoutputssameafteroutputs outputs slow def testmodelfrompretrainedself for modelname in tfclippretrainedmodelarchivelist 1 model tfclipmodel frompretrainedmodelname self assertisnotnonemodel unittest skipreasoncurrently savedmodel doesn t work with nested outputs slow def testsavedmodelcreationself pass unittest skipreasoncurrently savedmodel doesn t work with nested outputs slow def testsavedmodelcreationextendedself pass unittest skipreasonsavedmodel doesn t work with nested outputs so no preparation happens slow def testprepareservingoutputself pass we will verify our results on an image of cute cats def prepareimg url http images cocodataset orgval2017000000039769 jpg im image openrequests geturl streamtrue raw return im requirevision requiretf class tfclipmodelintegrationtestunittest testcase slow def testinferenceself modelname openaiclipvitbasepatch32 model tfclipmodel frompretrainedmodelname processor clipprocessor frompretrainedmodelname image prepareimg inputs processor texta photo of a cat a photo of a dog imagesimage paddingtrue returntensorstf outputs modelinputs trainingfalse verify the logits self assertequal outputs logitsperimage shape tf tensorshapeinputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape tf tensorshapeinputs inputids shape0 inputs pixelvalues shape0 expectedlogits tf constant24 5701 19 3049 tf debugging assertnearoutputs logitsperimage expectedlogits atol1e3 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow clip model expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as clip does not use input_ids inputs_embeds attention_mask and seq_length clip does not use inputs_embeds clip does not use inputs_embeds signature parameters is an ordereddict so arg_names order is deterministic in clip the seq_len equals the number of patches 1 we add 1 for the cls token check that output_attentions also work using config check attention is always last and order is fine clip has a different seq_length check that output_hidden_states also work using config in clip the seq_len equals the number of patches 1 we add 1 for the cls token check num outputs check num layers check attention outputs check hidden states make sure the first token has attention mask 1 to ensure that after combining the causal mask there is still at least one token being attended to for each batch todo change random_attention_mask in pt tf flax common test file after a discussion with the team clip does not use inputs_embeds check number of outputs check number of layers check hidden states check attention outputs hidden_states are tested in individual model tests input_embeds are tested in individual model tests clipmodel does not have input output embeddings overwrite from common since tfclipmodeltester set return_loss to true and causes the preparation of symbolic_inputs failed remove return_loss to make code work this condition is required since modeling_tf_clip py has 3 classes whose names end with mainlayer t5mainlayer needs an embed_tokens parameter when called without the inputs_embeds parameter take the same values than in tft5modeltester for this shared layer we will verify our results on an image of cute cats verify the logits
from __future__ import annotations import inspect import os import tempfile import unittest from importlib import import_module import requests from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCLIPModel, TFCLIPTextModel, TFCLIPVisionModel, TFSharedEmbeddings from transformers.models.clip.modeling_tf_clip import TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import CLIPProcessor class TFCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = TFCLIPVisionModel(config=config) result = model(pixel_values, training=False) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFCLIPVisionModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFCLIPVisionModel,) if is_tf_available() else () test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFCLIPVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): pass def test_graph_mode_with_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @slow def test_model_from_pretrained(self): for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] self.assertEqual(len(outputs), num_out) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [seq_len, self.model_tester.hidden_size], ) class TFCLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_mask = tf.concat( [tf.ones_like(input_mask[:, :1], dtype=input_mask.dtype), input_mask[:, 1:]], axis=-1 ) config = self.get_config() return config, input_ids, input_mask def get_config(self): return CLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = TFCLIPTextModel(config=config) result = model(input_ids, attention_mask=input_mask, training=False) result = model(input_ids, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFCLIPTextModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFCLIPTextModel,) if is_tf_available() else () test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_inputs_embeds(self): pass @slow def test_model_from_pretrained(self): for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] self.assertEqual(len(outputs), num_out) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) seq_length = self.model_tester.seq_length key_length = getattr(self.model_tester, "key_length", seq_length) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, key_length], ) class TFCLIPModelTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = TFCLIPTextModelTester(parent) self.vision_model_tester = TFCLIPVisionModelTester(parent) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return CLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFCLIPModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_tf class TFCLIPModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFCLIPModel,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFCLIPModel} if is_tf_available() else {} test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = TFCLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): pass def test_inputs_embeds(self): pass def test_model_common_attributes(self): pass def test_keras_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if self.__class__.__name__ == "TFCLIPModelTest": inputs_dict.pop("return_loss", None) tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and tf.keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: if "T5" in main_layer_class.__name__: shared = TFSharedEmbeddings(99, 32, name="shared") config.use_cache = inputs_dict.pop("use_cache", None) main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = tf.keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = tf.keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, tf.keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) @slow def test_model_from_pretrained(self): for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") @slow def test_saved_model_creation(self): pass @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") @slow def test_saved_model_creation_extended(self): pass @unittest.skip(reason="`saved_model` doesn't work with nested outputs so no preparation happens.") @slow def test_prepare_serving_output(self): pass def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_tf class TFCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "openai/clip-vit-base-patch32" model = TFCLIPModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="tf" ) outputs = model(**inputs, training=False) self.assertEqual( outputs.logits_per_image.shape, tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, tf.TensorShape((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = tf.constant([[24.5701, 19.3049]]) tf.debugging.assert_near(outputs.logits_per_image, expected_logits, atol=1e-3)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch clipseg model import inspect import os import tempfile import unittest import numpy as np import requests import transformers from transformers import modelmapping clipsegconfig clipsegprocessor clipsegtextconfig clipsegvisionconfig from transformers models auto import getvalues from transformers testingutils import isflaxavailable isptflaxcrosstest requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import clipsegforimagesegmentation clipsegmodel clipsegtextmodel clipsegvisionmodel from transformers models clipseg modelingclipseg import clipsegpretrainedmodelarchivelist if isvisionavailable from pil import image if isflaxavailable import jax numpy as jnp from transformers modelingflaxpytorchutils import convertpytorchstatedicttoflax loadflaxweightsinpytorchmodel class clipsegvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return clipsegvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model clipsegvisionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class clipsegvisionmodeltestmodeltestermixin unittest testcase allmodelclasses clipsegvisionmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester clipsegvisionmodeltesterself self configtester configtester self configclassclipsegvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonclipseg does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonclipsegvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonclipsegvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in clipsegpretrainedmodelarchivelist 1 model clipsegvisionmodel frompretrainedmodelname self assertisnotnonemodel class clipsegtextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return clipsegtextconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange def createandcheckmodelself config inputids inputmask model clipsegtextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class clipsegtextmodeltestmodeltestermixin unittest testcase allmodelclasses clipsegtextmodel if istorchavailable else fxcompatible false testpruning false testheadmasking false modelsplitpercents 0 5 0 8 0 9 def setupself self modeltester clipsegtextmodeltesterself self configtester configtesterself configclassclipsegtextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonclipseg does not use inputsembeds def testinputsembedsself pass unittest skipreasonclipsegtextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonclipsegtextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in clipsegpretrainedmodelarchivelist 1 model clipsegtextmodel frompretrainedmodelname self assertisnotnonemodel class clipsegmodeltester def init self parent textkwargsnone visionkwargsnone istrainingtrue this should respect the numhiddenlayers in clipsegvisionmodeltester extractlayers1 if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester clipsegtextmodeltesterparent textkwargs self visionmodeltester clipsegvisionmodeltesterparent visionkwargs self istraining istraining self extractlayers extractlayers def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return clipsegconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 reducedim32 extractlayersself extractlayers def createandcheckmodelself config inputids attentionmask pixelvalues model clipsegmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def createandcheckmodelforimagesegmentationself config inputids attentionmaks pixelvalues model clipsegforimagesegmentationconfig totorchdevice eval with torch nograd result modelinputids pixelvalues self parent assertequal result logits shape self visionmodeltester batchsize self visionmodeltester imagesize self visionmodeltester imagesize self parent assertequal result conditionalembeddings shape self textmodeltester batchsize config projectiondim def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues return config inputsdict requiretorch class clipsegmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses clipsegmodel clipsegforimagesegmentation if istorchavailable else pipelinemodelmapping featureextraction clipsegmodel if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false def prepareforclassself inputsdict modelclass returnlabelsfalse clipsegforimagesegmentation requires special treatment if returnlabels if modelclass name clipsegforimagesegmentation batchsize height width inputsdictpixelvalues shape inputsdictlabels torch zeros batchsize height width devicetorchdevice dtypetorch float return inputsdict def setupself self modeltester clipsegmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelforimagesegmentationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelforimagesegmentationconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonclipsegmodel does not have inputoutput embeddings def testmodelcommonattributesself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass override as the some parameters require custom initialization def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if logitscale in name self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized elif film in name or transposedconv in name or reduce in name those parameters use pytorch default nn linear initialization scheme pass else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues clipseg needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save clipsegconfig and check if we can load clipsegvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig clipsegvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save clipsegconfig and check if we can load clipsegtextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig clipsegtextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict overwrite from common since flaxclipsegmodel returns nested output which is not supported in the common test isptflaxcrosstest def testequivalencepttoflaxself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses with self subtestmodelclass name load pytorch class ptmodel modelclassconfig eval flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model ptmodel config usecache false fxmodelclassname flax modelclass name if not hasattrtransformers fxmodelclassname return fxmodelclass getattrtransformers fxmodelclassname load flax class fxmodel fxmodelclassconfig dtypejnp float32 make sure only flax inputs are forward that actually exist in function args fxinputkeys inspect signaturefxmodel call parameters keys prepare inputs ptinputs self prepareforclassinputsdict modelclass remove function args that don t exist in flax ptinputs k v for k v in ptinputs items if k in fxinputkeys fxstate convertpytorchstatedicttoflaxptmodel statedict fxmodel fxmodel params fxstate with torch nograd ptoutputs ptmodelptinputs totuple convert inputs to flax fxinputs k np arrayv tocpu for k v in ptinputs items if torch istensorv fxoutputs fxmodelfxinputs totuple self assertequallenfxoutputs lenptoutputs output lengths differ between flax and pytorch for fxoutput ptoutput in zipfxoutputs 4 ptoutputs 4 self assertalmostequalsfxoutput ptoutput numpy 4e2 with tempfile temporarydirectory as tmpdirname ptmodel savepretrainedtmpdirname fxmodelloaded fxmodelclass frompretrainedtmpdirname frompttrue fxoutputsloaded fxmodelloadedfxinputs totuple self assertequal lenfxoutputsloaded lenptoutputs output lengths differ between flax and pytorch for fxoutputloaded ptoutput in zipfxoutputsloaded 4 ptoutputs 4 self assertalmostequalsfxoutputloaded ptoutput numpy 4e2 overwrite from common since flaxclipsegmodel returns nested output which is not supported in the common test isptflaxcrosstest def testequivalenceflaxtoptself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses with self subtestmodelclass name load corresponding pytorch class ptmodel modelclassconfig eval so we disable usecache here for pytorch model ptmodel config usecache false fxmodelclassname flax modelclass name if not hasattrtransformers fxmodelclassname no flax model exists for this class return fxmodelclass getattrtransformers fxmodelclassname load flax class fxmodel fxmodelclassconfig dtypejnp float32 make sure only flax inputs are forward that actually exist in function args fxinputkeys inspect signaturefxmodel call parameters keys ptmodel loadflaxweightsinpytorchmodelptmodel fxmodel params make sure weights are tied in pytorch ptmodel tieweights prepare inputs ptinputs self prepareforclassinputsdict modelclass remove function args that don t exist in flax ptinputs k v for k v in ptinputs items if k in fxinputkeys with torch nograd ptoutputs ptmodelptinputs totuple fxinputs k np arrayv tocpu for k v in ptinputs items if torch istensorv fxoutputs fxmodelfxinputs totuple self assertequallenfxoutputs lenptoutputs output lengths differ between flax and pytorch for fxoutput ptoutput in zipfxoutputs 4 ptoutputs 4 self assertalmostequalsfxoutput ptoutput numpy 4e2 with tempfile temporarydirectory as tmpdirname fxmodel savepretrainedtmpdirname ptmodelloaded modelclass frompretrainedtmpdirname fromflaxtrue with torch nograd ptoutputsloaded ptmodelloadedptinputs totuple self assertequal lenfxoutputs lenptoutputsloaded output lengths differ between flax and pytorch for fxoutput ptoutput in zipfxoutputs 4 ptoutputsloaded 4 self assertalmostequalsfxoutput ptoutput numpy 4e2 def testtrainingself if not self modeltester istraining return for modelclass in self allmodelclasses config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true if modelclass in getvaluesmodelmapping continue printmodel class modelclass model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue for k v in inputs items printk v shape loss modelinputs loss loss backward slow def testmodelfrompretrainedself for modelname in clipsegpretrainedmodelarchivelist 1 model clipsegmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg url http images cocodataset orgval2017000000039769 jpg image image openrequests geturl streamtrue raw return image requirevision requiretorch class clipsegmodelintegrationtestunittest testcase slow def testinferenceimagesegmentationself modelname cidasclipsegrd64refined processor clipsegprocessor frompretrainedmodelname model clipsegforimagesegmentation frompretrainedmodelname totorchdevice image prepareimg texts a cat a remote a blanket inputs processortexttexts imagesimage lentexts paddingtrue returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the predicted masks self assertequal outputs logits shape torch size3 352 352 expectedmasksslice torch tensor 7 4613 7 4785 7 3628 7 3268 7 0899 7 1333 6 9838 6 7900 6 8913 totorchdevice self asserttruetorch allcloseoutputs logits0 3 3 expectedmasksslice atol1e3 verify conditional and pooled output expectedconditional torch tensor0 5601 0 0314 0 1980 totorchdevice expectedpooledoutput torch tensor0 5036 0 2681 0 2644 totorchdevice self asserttruetorch allcloseoutputs conditionalembeddings0 3 expectedconditional atol1e3 self asserttruetorch allcloseoutputs pooledoutput0 3 expectedpooledoutput atol1e3 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch clipseg model in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as clipseg does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic this should respect the num_hidden_layers in clipsegvisionmodeltester clipsegforimagesegmentation requires special treatment override as the some parameters require custom initialization check if logit_scale is initilized as per the original implementation those parameters use pytorch default nn linear initialization scheme to be sure we have no nan clipseg needs pixel_values save clipsegconfig and check if we can load clipsegvisionconfig from it save clipsegconfig and check if we can load clipsegtextconfig from it overwrite from common since flaxclipsegmodel returns nested output which is not supported in the common test load pytorch class flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax convert inputs to flax overwrite from common since flaxclipsegmodel returns nested output which is not supported in the common test load corresponding pytorch class so we disable use_cache here for pytorch model no flax model exists for this class load flax class make sure only flax inputs are forward that actually exist in function args make sure weights are tied in pytorch prepare inputs remove function args that don t exist in flax we will verify our results on an image of cute cats forward pass verify the predicted masks verify conditional and pooled output
import inspect import os import tempfile import unittest import numpy as np import requests import transformers from transformers import MODEL_MAPPING, CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig from transformers.models.auto import get_values from transformers.testing_utils import ( is_flax_available, is_pt_flax_cross_test, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegTextModel, CLIPSegVisionModel from transformers.models.clipseg.modeling_clipseg import CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) class CLIPSegVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPSegVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = CLIPSegVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class CLIPSegVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = CLIPSegVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=CLIPSegVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CLIPSeg does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIPSegVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPSegVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPSegVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class CLIPSegTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return CLIPSegTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = CLIPSegTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CLIPSegTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = CLIPSegTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPSegTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIPSeg does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="CLIPSegTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPSegTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPSegTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class CLIPSegModelTester: def __init__( self, parent, text_kwargs=None, vision_kwargs=None, is_training=True, extract_layers=(1,), ): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = CLIPSegTextModelTester(parent, **text_kwargs) self.vision_model_tester = CLIPSegVisionModelTester(parent, **vision_kwargs) self.is_training = is_training self.extract_layers = extract_layers def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return CLIPSegConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64, reduce_dim=32, extract_layers=self.extract_layers, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = CLIPSegModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def create_and_check_model_for_image_segmentation(self, config, input_ids, attention_maks, pixel_values): model = CLIPSegForImageSegmentation(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values) self.parent.assertEqual( result.logits.shape, ( self.vision_model_tester.batch_size, self.vision_model_tester.image_size, self.vision_model_tester.image_size, ), ) self.parent.assertEqual( result.conditional_embeddings.shape, (self.text_model_tester.batch_size, config.projection_dim) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegModel, CLIPSegForImageSegmentation) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": CLIPSegModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): if return_labels: if model_class.__name__ == "CLIPSegForImageSegmentation": batch_size, _, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [batch_size, height, width], device=torch_device, dtype=torch.float ) return inputs_dict def setUp(self): self.model_tester = CLIPSegModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_for_image_segmentation(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="CLIPSegModel does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "logit_scale" in name: self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif "film" in name or "transposed_conv" in name or "reduce" in name: pass else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = CLIPSegVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = CLIPSegTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return fx_model_class = getattr(transformers, fx_model_class_name) fx_model = fx_model_class(config, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return fx_model_class = getattr(transformers, fx_model_class_name) fx_model = fx_model_class(config, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) pt_model.tie_weights() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class in get_values(MODEL_MAPPING): continue print("Model class:", model_class) model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) for k, v in inputs.items(): print(k, v.shape) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): for model_name in CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPSegModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch class CLIPSegModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_segmentation(self): model_name = "CIDAS/clipseg-rd64-refined" processor = CLIPSegProcessor.from_pretrained(model_name) model = CLIPSegForImageSegmentation.from_pretrained(model_name).to(torch_device) image = prepare_img() texts = ["a cat", "a remote", "a blanket"] inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertEqual( outputs.logits.shape, torch.Size((3, 352, 352)), ) expected_masks_slice = torch.tensor( [[-7.4613, -7.4785, -7.3628], [-7.3268, -7.0899, -7.1333], [-6.9838, -6.7900, -6.8913]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_masks_slice, atol=1e-3)) expected_conditional = torch.tensor([0.5601, -0.0314, 0.1980]).to(torch_device) expected_pooled_output = torch.tensor([0.5036, -0.2681, -0.2644]).to(torch_device) self.assertTrue(torch.allclose(outputs.conditional_embeddings[0, :3], expected_conditional, atol=1e-3)) self.assertTrue(torch.allclose(outputs.pooled_output[0, :3], expected_pooled_output, atol=1e-3))
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from transformers tests models whisper testfeatureextractionwhisper floatslist creates a random float32 tensor if rng is none rng globalrng values for batchidx in rangeshape0 values append for in rangeshape1 values1 appendrng random scale return values requiretorch class clvpfeatureextractiontesterunittest testcase def init self parent batchsize7 minseqlength400 maxseqlength2000 featuresize10 hoplength160 chunklength8 paddingvalue0 0 samplingrate4000 returnattentionmaskfalse self parent parent self batchsize batchsize self minseqlength minseqlength self maxseqlength maxseqlength self seqlengthdiff self maxseqlength self minseqlength self batchsize 1 self paddingvalue paddingvalue self samplingrate samplingrate self returnattentionmask returnattentionmask self featuresize featuresize self chunklength chunklength self hoplength hoplength def preparefeatextractdictself return featuresize self featuresize hoplength self hoplength chunklength self chunklength paddingvalue self paddingvalue samplingrate self samplingrate returnattentionmask self returnattentionmask copied from transformers tests models whisper testfeatureextractionwhisper whisperfeatureextractiontester prepareinputsforcommon def prepareinputsforcommonself equallengthfalse numpifyfalse def flattenlistoflists return listitertools chainlistoflists if equallength speechinputs floatslistself maxseqlength self featuresize for in rangeself batchsize else make sure that inputs increase in size speechinputs floatslistx self featuresize for x in rangeself minseqlength self maxseqlength self seqlengthdiff if numpify speechinputs np asarrayx for x in speechinputs return speechinputs requiretorch class clvpfeatureextractiontestsequencefeatureextractiontestmixin unittest testcase featureextractionclass clvpfeatureextractor def setupself self featextracttester clvpfeatureextractiontesterself def teardownself super teardown cleanup as much as possible gpu memory occupied by pytorch gc collect torch cuda emptycache copied from transformers tests models whisper testfeatureextractionwhisper whisperfeatureextractiontest testfeatextractfromandsavepretrained def testfeatextractfromandsavepretrainedself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname savedfile featextractfirst savepretrainedtmpdirname0 checkjsonfilehascorrectformatsavedfile featextractsecond self featureextractionclass frompretrainedtmpdirname dictfirst featextractfirst todict dictsecond featextractsecond todict mel1 featextractfirst melfilters mel2 featextractsecond melfilters self asserttruenp allclosemel1 mel2 self assertequaldictfirst dictsecond copied from transformers tests models whisper testfeatureextractionwhisper whisperfeatureextractiontest testfeatextracttojsonfile def testfeatextracttojsonfileself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname jsonfilepath os path jointmpdirname featextract json featextractfirst tojsonfilejsonfilepath featextractsecond self featureextractionclass fromjsonfilejsonfilepath dictfirst featextractfirst todict dictsecond featextractsecond todict mel1 featextractfirst melfilters mel2 featextractsecond melfilters self asserttruenp allclosemel1 mel2 self assertequaldictfirst dictsecond def testcallself tests that all call wrap to encodeplus and batchencodeplus featureextractor self featureextractionclassself featextracttester preparefeatextractdict create three inputs of length 800 1000 and 1200 speechinputs floatslist1 x0 for x in range800 1400 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs test feature size inputfeatures featureextractornpspeechinputs paddingmaxlength returntensorsnp inputfeatures self asserttrueinputfeatures ndim 3 self asserttrueinputfeatures shape2 featureextractor featuresize test not batched input encodedsequences1 featureextractorspeechinputs0 returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs0 returntensorsnp inputfeatures self asserttruenp allcloseencodedsequences1 encodedsequences2 atol1e3 test batched encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test 2d numpy arrays are batched speechinputs floatslist1 x0 for x in 800 800 800 npspeechinputs np asarrayspeechinputs encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test truncation required speechinputs floatslist1 x0 for x in range200 featureextractor nsamples 500 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs speechinputstruncated x featureextractor nsamples for x in speechinputs npspeechinputstruncated np asarrayspeechinput for speechinput in speechinputstruncated encodedsequences1 featureextractornpspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputstruncated returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 copied from transformers tests models whisper testfeatureextractionwhisper whisperfeatureextractiontest testdoubleprecisionpad def testdoubleprecisionpadself import torch featureextractor self featureextractionclassself featextracttester preparefeatextractdict npspeechinputs np random rand100 32 astypenp float64 pyspeechinputs npspeechinputs tolist for inputs in pyspeechinputs npspeechinputs npprocessed featureextractor padinputfeatures inputs returntensorsnp self asserttruenpprocessed inputfeatures dtype np float32 ptprocessed featureextractor padinputfeatures inputs returntensorspt self asserttrueptprocessed inputfeatures dtype torch float32 def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation ds ds castcolumnaudio audiosamplingrate22050 automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples xsamplingrate for x in speechsamples slow def testintegrationself fmt off expectedinputfeatures torch tensor 0 9271 1 1405 1 4419 1 2470 1 2438 1 1787 1 0595 1 0570 1 1070 1 2205 1 2376 1 2997 1 1131 1 0843 1 0459 1 1858 1 2323 1 3582 1 3401 1 3770 1 4173 1 3381 1 2291 1 0854 1 2116 1 1873 1 2178 1 2137 1 3001 1 4274 fmt on inputspeech sr self loaddatasamples1 featureextractor clvpfeatureextractor frompretrainedsusnatoclvpdev inputfeatures featureextractorinputspeech samplingratesr0 returntensorspt inputfeatures self assertequalinputfeatures shape 1 80 517 self asserttruetorch allcloseinputfeatures0 0 30 expectedinputfeatures atol1e4 coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from transformers tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor copied from transformers tests models whisper test_feature_extraction_whisper whisperfeatureextractiontester prepare_inputs_for_common make sure that inputs increase in size clean up as much as possible gpu memory occupied by pytorch copied from transformers tests models whisper test_feature_extraction_whisper whisperfeatureextractiontest test_feat_extract_from_and_save_pretrained copied from transformers tests models whisper test_feature_extraction_whisper whisperfeatureextractiontest test_feat_extract_to_json_file tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test feature size test not batched input test batched test 2 d numpy arrays are batched test truncation required copied from transformers tests models whisper test_feature_extraction_whisper whisperfeatureextractiontest test_double_precision_pad automatic decoding with librispeech fmt off fmt on
import gc import itertools import os import random import tempfile import unittest import numpy as np from datasets import Audio, load_dataset from transformers import ClvpFeatureExtractor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, slow from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch class ClvpFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=10, hop_length=160, chunk_length=8, padding_value=0.0, sampling_rate=4_000, return_attention_mask=False, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.feature_size = feature_size self.chunk_length = chunk_length self.hop_length = hop_length def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch class ClvpFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = ClvpFeatureExtractor def setUp(self): self.feat_extract_tester = ClvpFeatureExtractionTester(self) def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size) encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in range(200, (feature_extractor.n_samples + 500), 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] speech_inputs_truncated = [x[: feature_extractor.n_samples] for x in speech_inputs] np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", Audio(sampling_rate=22050)) speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples] @slow def test_integration(self): EXPECTED_INPUT_FEATURES = torch.tensor( [ 0.9271, 1.1405, 1.4419, 1.2470, 1.2438, 1.1787, 1.0595, 1.0570, 1.1070, 1.2205, 1.2376, 1.2997, 1.1131, 1.0843, 1.0459, 1.1858, 1.2323, 1.3582, 1.3401, 1.3770, 1.4173, 1.3381, 1.2291, 1.0854, 1.2116, 1.1873, 1.2178, 1.2137, 1.3001, 1.4274 ] ) input_speech, sr = self._load_datasamples(1) feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev") input_features = feature_extractor(input_speech, sampling_rate=sr[0], return_tensors="pt").input_features self.assertEqual(input_features.shape, (1, 80, 517)) self.assertTrue(torch.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch clvp model import gc import tempfile import unittest import datasets import numpy as np from transformers import clvpconfig clvpdecoderconfig clvpencoderconfig from transformers testingutils import requiretorch slow torchdevice from transformers utils import istorchavailable from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import clvpencoder clvpforcausallm clvpmodel clvpmodelforconditionalgeneration from transformers models clvp modelingclvp import clvppretrainedmodelarchivelist from transformers import clvpfeatureextractor clvptokenizer class clvpencodertester def init self parent batchsize2 seqlength7 istrainingfalse useinputmasktrue uselabelstrue vocabsize50 hiddensize128 projectiondim16 numhiddenlayers2 numattentionheads4 intermediatesize32 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope self bostokenid vocabsize 1 self eostokenid vocabsize 1 def getconfigself encoderconfig clvpencoderconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange bostokenidself bostokenid eostokenidself eostokenid return encoderconfig def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 encoderconfig self getconfig return encoderconfig inputids inputmask def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs speechconfig inputids inputmask configandinputs inputsdict inputids inputids totorchdevice attentionmask inputmask totorchdevice return speechconfig inputsdict def createandcheckmodelself speechconfig inputids inputmask textconfig clvpencoderconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange textencodermodel clvpencoderconfigtextconfig textencodermodel totorchdevice textencodermodel eval with torch nograd result textencodermodelinputids attentionmaskinputmask result textencodermodelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult0 shape self batchsize self projectiondim now check with speech config speechencodermodel clvpencoderconfigspeechconfig speechencodermodel totorchdevice speechencodermodel eval with torch nograd result speechencodermodelinputids attentionmaskinputmask result speechencodermodelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult0 shape self batchsize self projectiondim requiretorch class clvpencodertestmodeltestermixin unittest testcase allmodelclasses clvpencoder if istorchavailable else testpruning false testheadmasking false testtorchscript false def setupself self modeltester clvpencodertesterself self encoderconfigtester configtesterself configclassclvpencoderconfig hiddensize32 def teardownself super teardown cleanup as much as possible gpu memory occupied by pytorch gc collect torch cuda emptycache def testconfigself self encoderconfigtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonclvpencoder does not output loss def testtrainingself pass unittest skipreasonclvpencoder does not output loss def testtraininggradientcheckpointingself pass class clvpdecodertester def init self parent batchsize2 seqlength3 istrainingfalse vocabsize300 maxpositionembeddings256 maxtexttokens256 useinputmasktrue hiddensize128 numhiddenlayers2 numattentionheads2 bostokenid97 eostokenid98 relativeattentionnumbuckets4 relativeattentionmaxdistance16 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self vocabsize vocabsize self maxpositionembeddings maxpositionembeddings self maxtexttokens maxtexttokens self useinputmask useinputmask self hiddensize hiddensize self numattentionheads numattentionheads self numhiddenlayers numhiddenlayers self bostokenid bostokenid self eostokenid eostokenid self relativeattentionnumbuckets relativeattentionnumbuckets self relativeattentionmaxdistance relativeattentionmaxdistance def getconfigself decoderconfig clvpdecoderconfig vocabsizeself vocabsize maxpositionembeddingsself maxpositionembeddings maxtexttokensself maxtexttokens hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads bostokenidself bostokenid eostokenidself eostokenid relativeattentionnumbucketsself relativeattentionnumbuckets relativeattentionmaxdistanceself relativeattentionmaxdistance return decoderconfig def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 decoderconfig self getconfig return decoderconfig inputids inputmask def createandcheckmodelself config inputids attentionmask model clvpforcausallmconfig totorchdevice eval with torch nograd result modelinputidsinputids attentionmaskattentionmask self parent assertequalresult0 shape self batchsize self seqlength self vocabsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask configandinputs inputsdict inputids inputids totorchdevice attentionmask attentionmask totorchdevice return config inputsdict requiretorch class clvpdecodertestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses clvpmodel clvpforcausallm if istorchavailable else allgenerativemodelclasses clvpforcausallm if istorchavailable else pipelinemodelmapping featureextraction clvpmodelforconditionalgeneration if istorchavailable else testpruning false def setupself self modeltester clvpdecodertesterself self decoderconfigtester configtesterself configclassclvpdecoderconfig hiddensize32 def teardownself super teardown cleanup as much as possible gpu memory occupied by pytorch gc collect torch cuda emptycache def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def prepareforclassself inputsdict modelclass returnlabelsfalse if returnlabels and modelclass clvpforcausallm inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength devicetorchdevice long return inputsdict def testtrainingself we will only test the clvpforcausallm since it outputs loss config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true model clvpforcausallmconfig model totorchdevice model train inputs self prepareforclassinputsdict clvpforcausallm returnlabelstrue loss modelinputs loss loss backward def testtraininggradientcheckpointingself we will only test the clvpforcausallm since it outputs loss config inputsdict self modeltester prepareconfigandinputsforcommon config usecache false config returndict true model clvpforcausallmconfig model totorchdevice model gradientcheckpointingenable model train inputs self prepareforclassinputsdict clvpforcausallm returnlabelstrue loss modelinputs loss loss backward class clvpmodelforconditionalgenerationtester def initself parent istrainingfalse self parent parent self clvpencodertester clvpencodertesterparent self istraining istraining def getconfigself decoderconfig clvpdecoderconfig vocabsize50 maxpositionembeddings30 maxtexttokens30 hiddensize128 numhiddenlayers1 numattentionheads2 bostokenid97 eostokenid98 relativeattentionnumbuckets4 relativeattentionmaxdistance16 textconfig self clvpencodertester getconfig speechconfig self clvpencodertester getconfig speechconfig vocabsize 300 return clvpconfig fromsubmodelconfigs textconfig speechconfig decoderconfig projectiondim16 def prepareconfigandinputsself inputids attentionmask self clvpencodertester prepareconfigandinputs ds datasets loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation ds ds castcolumnaudio datasets audiosamplingrate22050 audio sr ds sortid selectrange1 1audio0 values featureextractor clvpfeatureextractor inputfeatures featureextractorrawspeechaudio samplingratesr returntensorspt inputfeatures totorchdevice config self getconfig return config inputids attentionmask inputfeatures def createandcheckmodelself config inputids attentionmask inputfeatures model clvpmodelforconditionalgenerationconfig totorchdevice eval with torch nograd result modelinputidsinputids inputfeaturesinputfeatures attentionmaskattentionmask self parent assertequalresult logitsperspeech shape 2 self clvpencodertester batchsize self parent assertequalresult logitspertext shape self clvpencodertester batchsize 2 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask inputfeatures configandinputs inputsdict inputids inputids totorchdevice attentionmask attentionmask totorchdevice inputfeatures inputfeatures totorchdevice returnloss false return config inputsdict requiretorch class clvpmodelforconditionalgenerationtestmodeltestermixin unittest testcase allmodelclasses clvpmodelforconditionalgeneration if istorchavailable else testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testtorchscript false def setupself self modeltester clvpmodelforconditionalgenerationtesterself self clvpconfigtester configtesterself configclassclvpconfig hiddensize32 def teardownself super teardown cleanup as much as possible gpu memory occupied by pytorch gc collect torch cuda emptycache def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass check for decoder model text encoder model and speech encoder model hidden states decoderhiddenstates outputs decoderhiddenstates textencoderhiddenstates outputs textencoderhiddenstates speechencoderhiddenstates outputs speechencoderhiddenstates check length of the hidden states expecteddecodernumlayers config decoderconfig numhiddenlayers 1 self assertequallendecoderhiddenstates expecteddecodernumlayers expectedspeechencodernumlayers config textconfig numhiddenlayers 1 self assertequallentextencoderhiddenstates expectedspeechencodernumlayers expectedtextencodernumlayers config speechconfig numhiddenlayers 1 self assertequallenspeechencoderhiddenstates expectedtextencodernumlayers check shapes of each hidden state for the decoder model we will only test the dimension because the clvpconditioningencoder could increase the sequence lengths self assertequaldecoderhiddenstates0 shape1 config decoderconfig hiddensize the testing for text encoder stays standard because we just pass the text tokens here self assertlistequal listtextencoderhiddenstates0 shape2 self modeltester clvpencodertester seqlength config textconfig hiddensize for the decoder model we will only test the dimension because the fixdecoderoutputs method could increase the sequence lengths by adding decoderfixingcodes tokens at the end self assertequalspeechencoderhiddenstates0 shape1 config speechconfig hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonclvpmodelforconditionalgeneration does not have getinputembeddings def testinputsembedsself pass unittest skipreasonclvpmodelforconditionalgeneration does not have getinputembeddings def testmodelcommonattributesself pass override as the logitscale parameter initilization is different for clvp def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale expectedvalue np log1 0 07 returnedvalue param data item self assertalmostequal returnedvalue expectedvalue delta1e3 msgfparameter name of model modelclass seems not properly initialized else expectedrange 0 0 1 0 returnedrange param data mean 1e9 round 1e9 item self assertin returnedrange expectedrange msgfparameter name of model modelclass seems not properly initialized def testloadspeechtextdecoderconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save clvpconfig and check if we can load clvpencoderconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname encoderconfig clvpencoderconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict encoderconfig todict save clvpconfig and check if we can load clvpdecoderconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname decoderconfig clvpdecoderconfig frompretrainedtmpdirname self assertdictequalconfig decoderconfig todict decoderconfig todict slow def testmodelfrompretrainedself for modelname in clvppretrainedmodelarchivelist 1 model clvpmodelforconditionalgeneration frompretrainedmodelname self assertisnotnonemodel since clvp has a lot of different models connected with each other it s better to test each of them individually along with a testfullmodelintegration if the model breaks in future it could be of a great help to identify the broken part slow requiretorch class clvpintegrationtestunittest testcase def setupself self text this is an example text ds datasets loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation ds ds castcolumnaudio datasets audiosamplingrate22050 self speechsamples self sr ds sortid selectrange1 1audio0 values self model clvpmodelforconditionalgeneration frompretrainedsusnatoclvpdev totorchdevice self model eval tokenizer clvptokenizer frompretrainedsusnatoclvpdev featureextractor clvpfeatureextractor frompretrainedsusnatoclvpdev tokenizeroutput tokenizerself text returntensorspt self texttokens tokenizeroutputinputids totorchdevice self inputfeatures featureextractor rawspeechself speechsamples samplingrateself sr returntensorspt inputfeatures totorchdevice def teardownself super teardown cleanup as much as possible gpu memory occupied by pytorch gc collect torch cuda emptycache def testconditionalencoderself with torch nograd conditioningencoderoutputs self model conditioningencoder inputfeaturesself inputfeatures inputidsself texttokens tocpu self assertequal conditioningencoderoutputs shape torch sizeself inputfeatures shape0 18 self model config decoderconfig hiddensize expectedoutputs torch tensor 0 8582 0 5228 1 9944 0 0465 1 1017 0 0093 0 0466 0 6030 0 1280 self asserttruetorch allcloseconditioningencoderoutputs0 3 3 expectedoutputs atol1e4 def testdecodermodelgenerateself autoregressivemodeloutput self model speechdecodermodel generateinputidsself texttokens cpu expectedoutputs torch tensor147 2 54 2 43 2 169 122 29 64 2 136 37 33 9 8193 self asserttruetorch allcloseautoregressivemodeloutput expectedoutputs def testtextandspeechencodermodelsself check for text embeds textembeds self model textencodermodelinputidsself texttokens returndicttrue0 cpu fmt off expectedtextembeds torch tensor1 4798 2 0005 2 3902 0 5042 1 6401 2 4135 1 4800 3 0118 2 4422 1 3266 2 2339 1 4761 4 8983 1 3592 6 0251 6 7364 2 2576 3 7229 10 0436 4 6676 fmt on self asserttruetorch allclosetextembeds0 20 expectedtextembeds atol1e4 check for speech embeds speechembeds self model speechencodermodelinputidsself texttokens returndicttrue0 cpu fmt off expectedspeechembeds torch tensor3 1202 3 1183 1 4264 6 1339 1 8885 0 1983 0 9461 1 7414 0 3320 3 8400 1 5715 1 5096 1 7576 0 2387 4 9758 5 8450 6 2534 2 8587 5 5816 4 7821 fmt on self asserttruetorch allclosespeechembeds0 20 expectedspeechembeds atol1e4 def testfullmodelintegrationself fullmodeloutput self model generate inputidsself texttokens inputfeaturesself inputfeatures dosamplefalse numbeams4 numreturnsequences4 maxnewtokens10 expectedspeechids torch tensor1953 1080 612 1953 612 493 1953 612 716 expectedsimilarityscores torch tensor14 7660 14 4569 13 6472 13 5683 self asserttruetorch allclosefullmodeloutput speechids cpu3 3 expectedspeechids self asserttruetorch allclosefullmodeloutput logitspertext cpu expectedsimilarityscores coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch clvp model now check with speech config clean up as much as possible gpu memory occupied by pytorch clean up as much as possible gpu memory occupied by pytorch we will only test the clvpforcausallm since it outputs loss we will only test the clvpforcausallm since it outputs loss clean up as much as possible gpu memory occupied by pytorch check for decoder model text encoder model and speech encoder model hidden states check length of the hidden states check shapes of each hidden state for the decoder model we will only test the dimension because the clvpconditioningencoder could increase the sequence lengths the testing for text encoder stays standard because we just pass the text tokens here for the decoder model we will only test the dimension because the fix_decoder_outputs method could increase the sequence lengths by adding decoder_fixing_codes tokens at the end check that output_hidden_states also work using config override as the logit_scale parameter initilization is different for clvp check if logit_scale is initilized as per the original implementation save clvpconfig and check if we can load clvpencoderconfig from it save clvpconfig and check if we can load clvpdecoderconfig from it since clvp has a lot of different models connected with each other it s better to test each of them individually along with a test_full_model_integration if the model breaks in future it could be of a great help to identify the broken part clean up as much as possible gpu memory occupied by pytorch check for text embeds fmt off fmt on check for speech embeds fmt off fmt on
import gc import tempfile import unittest import datasets import numpy as np from transformers import ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig from transformers.testing_utils import ( require_torch, slow, torch_device, ) from transformers.utils import is_torch_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ClvpEncoder, ClvpForCausalLM, ClvpModel, ClvpModelForConditionalGeneration from transformers.models.clvp.modeling_clvp import CLVP_PRETRAINED_MODEL_ARCHIVE_LIST from transformers import ClvpFeatureExtractor, ClvpTokenizer class ClvpEncoderTester: def __init__( self, parent, batch_size=2, seq_length=7, is_training=False, use_input_mask=True, use_labels=True, vocab_size=50, hidden_size=128, projection_dim=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=32, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 def get_config(self): encoder_config = ClvpEncoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, ) return encoder_config def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 encoder_config = self.get_config() return encoder_config, input_ids, input_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() speech_config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids.to(torch_device), "attention_mask": input_mask.to(torch_device)} return speech_config, inputs_dict def create_and_check_model(self, speech_config, input_ids, input_mask): text_config = ClvpEncoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) text_encoder_model = ClvpEncoder(config=text_config) text_encoder_model.to(torch_device) text_encoder_model.eval() with torch.no_grad(): result = text_encoder_model(input_ids, attention_mask=input_mask) result = text_encoder_model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim)) speech_encoder_model = ClvpEncoder(config=speech_config) speech_encoder_model.to(torch_device) speech_encoder_model.eval() with torch.no_grad(): result = speech_encoder_model(input_ids, attention_mask=input_mask) result = speech_encoder_model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim)) @require_torch class ClvpEncoderTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ClvpEncoder,) if is_torch_available() else () test_pruning = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = ClvpEncoderTester(self) self.encoder_config_tester = ConfigTester(self, config_class=ClvpEncoderConfig, hidden_size=32) def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_config(self): self.encoder_config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="ClvpEncoder does not output loss") def test_training(self): pass @unittest.skip(reason="ClvpEncoder does not output loss") def test_training_gradient_checkpointing(self): pass class ClvpDecoderTester: def __init__( self, parent, batch_size=2, seq_length=3, is_training=False, vocab_size=300, max_position_embeddings=256, max_text_tokens=256, use_input_mask=True, hidden_size=128, num_hidden_layers=2, num_attention_heads=2, bos_token_id=97, eos_token_id=98, relative_attention_num_buckets=4, relative_attention_max_distance=16, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.max_text_tokens = max_text_tokens self.use_input_mask = use_input_mask self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance def get_config(self): decoder_config = ClvpDecoderConfig( vocab_size=self.vocab_size, max_position_embeddings=self.max_position_embeddings, max_text_tokens=self.max_text_tokens, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, relative_attention_num_buckets=self.relative_attention_num_buckets, relative_attention_max_distance=self.relative_attention_max_distance, ) return decoder_config def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 decoder_config = self.get_config() return decoder_config, input_ids, input_mask def create_and_check_model(self, config, input_ids, attention_mask): model = ClvpForCausalLM(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids=input_ids, attention_mask=attention_mask) self.parent.assertEqual(result[0].shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = { "input_ids": input_ids.to(torch_device), "attention_mask": attention_mask.to(torch_device), } return config, inputs_dict @require_torch class ClvpDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ClvpModel, ClvpForCausalLM) if is_torch_available() else () all_generative_model_classes = (ClvpForCausalLM,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ClvpModelForConditionalGeneration} if is_torch_available() else {} test_pruning = False def setUp(self): self.model_tester = ClvpDecoderTester(self) self.decoder_config_tester = ConfigTester(self, config_class=ClvpDecoderConfig, hidden_size=32) def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): if return_labels and model_class == ClvpForCausalLM: inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, self.model_tester.seq_length], device=torch_device ).long() return inputs_dict def test_training(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = ClvpForCausalLM(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = ClvpForCausalLM(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True) loss = model(**inputs).loss loss.backward() class ClvpModelForConditionalGenerationTester: def __init__(self, parent, is_training=False): self.parent = parent self.clvp_encoder_tester = ClvpEncoderTester(parent) self.is_training = is_training def get_config(self): decoder_config = ClvpDecoderConfig( vocab_size=50, max_position_embeddings=30, max_text_tokens=30, hidden_size=128, num_hidden_layers=1, num_attention_heads=2, bos_token_id=97, eos_token_id=98, relative_attention_num_buckets=4, relative_attention_max_distance=16, ) text_config = self.clvp_encoder_tester.get_config() speech_config = self.clvp_encoder_tester.get_config() speech_config.vocab_size = 300 return ClvpConfig.from_sub_model_configs( text_config, speech_config, decoder_config, projection_dim=16, ) def prepare_config_and_inputs(self): _, input_ids, attention_mask = self.clvp_encoder_tester.prepare_config_and_inputs() ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values() feature_extractor = ClvpFeatureExtractor() input_features = feature_extractor(raw_speech=audio, sampling_rate=sr, return_tensors="pt")[ "input_features" ].to(torch_device) config = self.get_config() return config, input_ids, attention_mask, input_features def create_and_check_model(self, config, input_ids, attention_mask, input_features): model = ClvpModelForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids=input_ids, input_features=input_features, attention_mask=attention_mask) self.parent.assertEqual(result.logits_per_speech.shape, (2, self.clvp_encoder_tester.batch_size)) self.parent.assertEqual(result.logits_per_text.shape, (self.clvp_encoder_tester.batch_size, 2)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, input_features = config_and_inputs inputs_dict = { "input_ids": input_ids.to(torch_device), "attention_mask": attention_mask.to(torch_device), "input_features": input_features.to(torch_device), "return_loss": False, } return config, inputs_dict @require_torch class ClvpModelForConditionalGenerationTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ClvpModelForConditionalGeneration,) if is_torch_available() else () test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = ClvpModelForConditionalGenerationTester(self) self.clvp_config_tester = ConfigTester(self, config_class=ClvpConfig, hidden_size=32) def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) decoder_hidden_states = outputs.decoder_hidden_states text_encoder_hidden_states = outputs.text_encoder_hidden_states speech_encoder_hidden_states = outputs.speech_encoder_hidden_states expected_decoder_num_layers = config.decoder_config.num_hidden_layers + 1 self.assertEqual(len(decoder_hidden_states), expected_decoder_num_layers) expected_speech_encoder_num_layers = config.text_config.num_hidden_layers + 1 self.assertEqual(len(text_encoder_hidden_states), expected_speech_encoder_num_layers) expected_text_encoder_num_layers = config.speech_config.num_hidden_layers + 1 self.assertEqual(len(speech_encoder_hidden_states), expected_text_encoder_num_layers) self.assertEqual(decoder_hidden_states[0].shape[-1], config.decoder_config.hidden_size) self.assertListEqual( list(text_encoder_hidden_states[0].shape[-2:]), [self.model_tester.clvp_encoder_tester.seq_length, config.text_config.hidden_size], ) self.assertEqual(speech_encoder_hidden_states[0].shape[-1], config.speech_config.hidden_size) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings") def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": expected_value = np.log(1 / 0.07) returned_value = param.data.item() self.assertAlmostEqual( returned_value, expected_value, delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: expected_range = [0.0, 1.0] returned_range = ((param.data.mean() * 1e9).round() / 1e9).item() self.assertIn( returned_range, expected_range, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_load_speech_text_decoder_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) encoder_config = ClvpEncoderConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), encoder_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) decoder_config = ClvpDecoderConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.decoder_config.to_dict(), decoder_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in CLVP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClvpModelForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch class ClvpIntegrationTest(unittest.TestCase): def setUp(self): self.text = "This is an example text." ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) _, self.speech_samples, self.sr = ds.sort("id").select(range(1))[:1]["audio"][0].values() self.model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev").to(torch_device) self.model.eval() tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev") feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev") tokenizer_output = tokenizer(self.text, return_tensors="pt") self.text_tokens = tokenizer_output["input_ids"].to(torch_device) self.input_features = feature_extractor( raw_speech=self.speech_samples, sampling_rate=self.sr, return_tensors="pt" )["input_features"].to(torch_device) def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_conditional_encoder(self): with torch.no_grad(): conditioning_encoder_outputs = self.model.conditioning_encoder( input_features=self.input_features, input_ids=self.text_tokens ).to("cpu") self.assertEqual( conditioning_encoder_outputs.shape, torch.Size((self.input_features.shape[0], 18, self.model.config.decoder_config.hidden_size)), ) EXPECTED_OUTPUTS = torch.tensor( [[-0.8582, 0.5228, 1.9944], [-0.0465, -1.1017, -0.0093], [-0.0466, -0.6030, -0.1280]] ) self.assertTrue(torch.allclose(conditioning_encoder_outputs[0, :3, :3], EXPECTED_OUTPUTS, atol=1e-4)) def test_decoder_model_generate(self): autoregressive_model_output = self.model.speech_decoder_model.generate(input_ids=self.text_tokens).cpu() EXPECTED_OUTPUTS = torch.tensor([[147, 2, 54, 2, 43, 2, 169, 122, 29, 64, 2, 136, 37, 33, 9, 8193]]) self.assertTrue(torch.allclose(autoregressive_model_output, EXPECTED_OUTPUTS)) def test_text_and_speech_encoder_models(self): text_embeds = self.model.text_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu() EXPECTED_TEXT_EMBEDS = torch.tensor([1.4798, -2.0005, 2.3902, -0.5042, 1.6401, -2.4135, -1.4800, 3.0118, -2.4422, 1.3266, 2.2339, 1.4761, -4.8983, -1.3592, 6.0251, 6.7364, 2.2576, 3.7229, -10.0436, 4.6676]) self.assertTrue(torch.allclose(text_embeds[0, :20], EXPECTED_TEXT_EMBEDS, atol=1e-4)) speech_embeds = self.model.speech_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu() EXPECTED_SPEECH_EMBEDS = torch.tensor([3.1202, -3.1183, -1.4264, -6.1339, 1.8885, -0.1983, 0.9461, -1.7414, 0.3320, -3.8400, -1.5715, 1.5096, -1.7576, 0.2387, 4.9758, 5.8450, -6.2534, 2.8587, -5.5816, 4.7821]) self.assertTrue(torch.allclose(speech_embeds[0, :20], EXPECTED_SPEECH_EMBEDS, atol=1e-4)) def test_full_model_integration(self): full_model_output = self.model.generate( input_ids=self.text_tokens, input_features=self.input_features, do_sample=False, num_beams=4, num_return_sequences=4, max_new_tokens=10, ) EXPECTED_SPEECH_IDS = torch.tensor([[1953, 1080, 612], [1953, 612, 493], [1953, 612, 716]]) EXPECTED_SIMILARITY_SCORES = torch.tensor([[14.7660, 14.4569, 13.6472, 13.5683]]) self.assertTrue(torch.allclose(full_model_output.speech_ids.cpu()[-3:, -3:], EXPECTED_SPEECH_IDS)) self.assertTrue(torch.allclose(full_model_output.logits_per_text.cpu(), EXPECTED_SIMILARITY_SCORES))
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from transformers tests models whisper testprocessorwhisper whisperprocessortest gettokenizer with whisperclvp copied from transformers tests models whisper testprocessorwhisper whisperprocessortest getfeatureextractor with whisperclvp copied from transformers tests models whisper testprocessorwhisper whisperprocessortest testsaveloadpretraineddefault with whisperclvp copied from transformers tests models whisper testprocessorwhisper whisperprocessortest testfeatureextractor with whisperclvp processorrawspeechprocessorrawspeechrawspeech copied from transformers tests models whisper testprocessorwhisper whisperprocessortest testtokenizer with whisperclvp copied from transformers tests models whisper testprocessorwhisper whisperprocessortest testtokenizerdecode with whisperclvp 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from transformers tests models whisper test_processor_whisper whisperprocessortest get_tokenizer with whisper clvp copied from transformers tests models whisper test_processor_whisper whisperprocessortest get_feature_extractor with whisper clvp copied from transformers tests models whisper test_processor_whisper whisperprocessortest test_save_load_pretrained_default with whisper clvp copied from transformers tests models whisper test_processor_whisper whisperprocessortest test_feature_extractor with whisper clvp processor raw_speech processor raw_speech raw_speech copied from transformers tests models whisper test_processor_whisper whisperprocessortest test_tokenizer with whisper clvp copied from transformers tests models whisper test_processor_whisper whisperprocessortest test_tokenizer_decode with whisper clvp
import gc import shutil import tempfile import unittest from transformers import ClvpFeatureExtractor, ClvpProcessor, ClvpTokenizer from transformers.testing_utils import require_torch from .test_feature_extraction_clvp import floats_list @require_torch class ClvpProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "susnato/clvp_dev" self.tmpdirname = tempfile.mkdtemp() def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdirname) gc.collect() def get_tokenizer(self, **kwargs): return ClvpTokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return ClvpFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = ClvpProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, ClvpTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, ClvpFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_save_load_pretrained_additional_features(self): processor = ClvpProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(pad_token="(PAD)") feature_extractor_add_kwargs = self.get_feature_extractor(sampling_rate=16000) processor = ClvpProcessor.from_pretrained( self.tmpdirname, pad_token="(PAD)", sampling_rate=16000, ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, ClvpTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, ClvpFeatureExtractor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( sorted(processor.model_input_names), sorted(set(feature_extractor.model_input_names + tokenizer.model_input_names)), msg="`processor` and `feature_extractor` model input names do not match", )
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice special case for doubleheads model define pad token eos token 50256 use different length sentences to test batching coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice special case for doubleheads model define pad token eos token 50256 use different length sentences to test batching token_type_ids should change output token_type_ids should change output
import datetime import unittest from transformers import CodeGenConfig, is_torch_available from transformers.file_utils import cached_property from transformers.testing_utils import backend_manual_seed, is_flaky, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST, AutoTokenizer, CodeGenForCausalLM, CodeGenModel class CodeGenModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=256, hidden_size=32, rotary_dim=4, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.rotary_dim = rotary_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def get_large_model_config(self): return CodeGenConfig.from_pretrained("Salesforce/codegen-2B-mono") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config(self): return CodeGenConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_codegen_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = CodeGenModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_codegen_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = CodeGenModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_codegen_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = CodeGenModel(config=config) model.to(torch_device) model.eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 output, past = model(input_ids, attention_mask=attn_mask).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_codegen_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = CodeGenModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = CodeGenForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = CodeGenForCausalLM(config) if gradient_checkpointing: model.gradient_checkpointing_enable() model.to(torch_device) result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask} return config, inputs_dict @require_torch class CodeGenModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CodeGenModel, CodeGenForCausalLM) if is_torch_available() else () all_generative_model_classes = (CodeGenForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": CodeGenModel, "text-generation": CodeGenForCausalLM} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_missing_keys = False test_model_parallel = False test_head_masking = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) return inputs_dict def setUp(self): self.model_tester = CodeGenModelTester(self) self.config_tester = ConfigTester(self, config_class=CodeGenConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_codegen_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_codegen_model(*config_and_inputs) def test_codegen_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_codegen_model_past(*config_and_inputs) def test_codegen_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_codegen_model_attention_mask_past(*config_and_inputs) def test_codegen_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_codegen_model_past_large_inputs(*config_and_inputs) def test_codegen_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_codegen_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) @slow def test_batch_generation(self): tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono") model = CodeGenForCausalLM.from_pretrained("Salesforce/codegen-350M-mono") model.to(torch_device) tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id sentences = ["def hellow_world():", "def greet(name):"] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) token_type_ids = torch.cat( [ input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0), input_ids.new_full((input_ids.shape[0], 1), 500), ], dim=-1, ) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) outputs_tt = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), token_type_ids=token_type_ids, ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ 'def hellow_world():\n print("Hello World")\n\nhellow_world()', 'def greet(name):\n print(f"Hello {name}")\n\ng', ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertTrue(batch_out_sentence_tt != batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_model_from_pretrained(self): for model_name in CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CodeGenModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class CodeGenModelLanguageGenerationTest(unittest.TestCase): @cached_property def cached_tokenizer(self): return AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono") @cached_property def cached_model(self): return CodeGenForCausalLM.from_pretrained("Salesforce/codegen-350M-mono") @slow def test_lm_generate_codegen(self): tokenizer = self.cached_tokenizer for checkpointing in [True, False]: model = self.cached_model if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) inputs = tokenizer("def hello_world():", return_tensors="pt").to(torch_device) expected_output = 'def hello_world():\n print("Hello World")\n\nhello_world()\n\n' output_ids = model.generate(**inputs, do_sample=False) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, expected_output) @slow def test_codegen_sample(self): tokenizer = self.cached_tokenizer model = self.cached_model model.to(torch_device) torch.manual_seed(0) backend_manual_seed(torch_device, 0) tokenized = tokenizer("def hello_world():", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) output_ids = model.generate(input_ids, do_sample=True) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) token_type_ids = tokenized.token_type_ids.to(torch_device) output_seq = model.generate(input_ids=input_ids, do_sample=True, num_return_sequences=5) output_seq_tt = model.generate( input_ids=input_ids, token_type_ids=token_type_ids, do_sample=True, num_return_sequences=5 ) output_seq_strs = tokenizer.batch_decode(output_seq, skip_special_tokens=True) output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True) if torch_device == "cuda": EXPECTED_OUTPUT_STR = 'def hello_world():\n print("Hello World")\n return True\n\nresult =' else: EXPECTED_OUTPUT_STR = "def hello_world():\r\n print('Hello, World.')\r\n\r\n\r" self.assertEqual(output_str, EXPECTED_OUTPUT_STR) self.assertTrue( all(output_seq_strs[idx] != output_seq_tt_strs[idx] for idx in range(len(output_seq_tt_strs))) ) @is_flaky(max_attempts=3, description="measure of timing is somehow flaky.") @slow def test_codegen_sample_max_time(self): tokenizer = self.cached_tokenizer model = self.cached_model model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) MAX_TIME = 0.05 start = datetime.datetime.now() model.generate(input_ids, do_sample=True, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=2 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=2 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=2 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=True, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=2 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=None, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=2 * MAX_TIME))
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting sizelongestedge maxresolution we re effectively not testing this p this function computes the expected height and width when providing images to conditionaldetrimageprocessor assuming doresize is set to true with a scalar size prepare image and target encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify origsize verify size prepare image target and maskspath encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify masks verify origsize verify size coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting size longest_edge max_resolution we re effectively not testing this p this function computes the expected height and width when providing images to conditionaldetrimageprocessor assuming do_resize is set to true with a scalar size prepare image and target encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify orig_size verify size prepare image target and masks_path encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify masks verify orig_size verify size
import json import pathlib import unittest from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class ConditionalDetrImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ConditionalDetrImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ConditionalDetrImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ConditionalDetrImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) @slow def test_call_pytorch_with_coco_detection_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} image_processing = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") encoding = image_processing(images=image, annotations=target, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") image_processing = ConditionalDetrImageProcessor(format="coco_panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_masks_sum = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch conditional detr model import inspect import math import unittest from transformers import conditionaldetrconfig resnetconfig istorchavailable isvisionavailable from transformers testingutils import requiretimm requiretorch requirevision slow torchdevice from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import conditionaldetrforobjectdetection conditionaldetrforsegmentation conditionaldetrmodel if isvisionavailable from pil import image from transformers import conditionaldetrimageprocessor class conditionaldetrmodeltester def init self parent batchsize8 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads8 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 numqueries12 numchannels3 minsize200 maxsize200 ntargets8 numlabels91 self parent parent self batchsize batchsize self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self numqueries numqueries self numchannels numchannels self minsize minsize self maxsize maxsize self ntargets ntargets self numlabels numlabels we also set the expected seq length for both encoder and decoder self encoderseqlength math ceilself minsize 32 math ceilself maxsize 32 self decoderseqlength self numqueries def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self minsize self maxsize pixelmask torch onesself batchsize self minsize self maxsize devicetorchdevice labels none if self uselabels labels is a list of dict each dict being the labels for a given example in the batch labels for i in rangeself batchsize target targetclasslabels torch randint highself numlabels sizeself ntargets devicetorchdevice targetboxes torch randself ntargets 4 devicetorchdevice targetmasks torch randself ntargets self minsize self maxsize devicetorchdevice labels appendtarget config self getconfig return config pixelvalues pixelmask labels def getconfigself resnetconfig resnetconfig numchannels3 embeddingssize10 hiddensizes10 20 30 40 depths1 1 2 1 hiddenactrelu numlabels3 outfeaturesstage2 stage3 stage4 outindices2 3 4 return conditionaldetrconfig dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob numqueriesself numqueries numlabelsself numlabels usetimmbackbonefalse backboneconfigresnetconfig def prepareconfigandinputsforcommonself config pixelvalues pixelmask labels self prepareconfigandinputs inputsdict pixelvalues pixelvalues pixelmask pixelmask return config inputsdict def createandcheckconditionaldetrmodelself config pixelvalues pixelmask labels model conditionaldetrmodelconfigconfig model totorchdevice model eval result modelpixelvaluespixelvalues pixelmaskpixelmask result modelpixelvalues self parent assertequal result lasthiddenstate shape self batchsize self decoderseqlength self hiddensize def createandcheckconditionaldetrobjectdetectionheadmodelself config pixelvalues pixelmask labels model conditionaldetrforobjectdetectionconfigconfig model totorchdevice model eval result modelpixelvaluespixelvalues pixelmaskpixelmask result modelpixelvalues self parent assertequalresult logits shape self batchsize self numqueries self numlabels self parent assertequalresult predboxes shape self batchsize self numqueries 4 result modelpixelvaluespixelvalues pixelmaskpixelmask labelslabels self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self numqueries self numlabels self parent assertequalresult predboxes shape self batchsize self numqueries 4 requiretorch class conditionaldetrmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses conditionaldetrmodel conditionaldetrforobjectdetection conditionaldetrforsegmentation if istorchavailable else pipelinemodelmapping featureextraction conditionaldetrmodel objectdetection conditionaldetrforobjectdetection if istorchavailable else isencoderdecoder true testtorchscript false testpruning false testheadmasking false testmissingkeys false special case for head models def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass name in conditionaldetrforobjectdetection conditionaldetrforsegmentation labels for i in rangeself modeltester batchsize target targetclasslabels torch ones sizeself modeltester ntargets devicetorchdevice dtypetorch long targetboxes torch ones self modeltester ntargets 4 devicetorchdevice dtypetorch float targetmasks torch ones self modeltester ntargets self modeltester minsize self modeltester maxsize devicetorchdevice dtypetorch float labels appendtarget inputsdictlabels labels return inputsdict def setupself self modeltester conditionaldetrmodeltesterself self configtester configtesterself configclassconditionaldetrconfig hastextmodalityfalse def testconfigself self configtester runcommontests def testconditionaldetrmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckconditionaldetrmodelconfigandinputs def testconditionaldetrobjectdetectionheadmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckconditionaldetrobjectdetectionheadmodelconfigandinputs todo check if this works again for pytorch 2 x y unittest skipreasongot cuda error misaligned address with pytorch 2 0 0 def testmultigpudataparallelforwardself pass unittest skipreasonconditional detr does not use inputsembeds def testinputsembedsself pass unittest skipreasonconditional detr does not have a getinputembeddings method def testmodelcommonattributesself pass unittest skipreasonconditional detr is not a generative model def testgeneratewithoutinputidsself pass unittest skipreasonconditional detr does not use token embeddings def testresizetokensembeddingsself pass slow def testmodeloutputsequivalenceself todo niels fix me pass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true decoderseqlength self modeltester decoderseqlength encoderseqlength self modeltester encoderseqlength decoderkeylength self modeltester decoderseqlength encoderkeylength self modeltester encoderseqlength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength outlen lenoutputs if self isencoderdecoder correctoutlen 6 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning object detection model returns predlogits and predboxes if modelclass name conditionaldetrforobjectdetection correctoutlen 1 panoptic segmentation model returns predlogits predboxes predmasks if modelclass name conditionaldetrforsegmentation correctoutlen 2 if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength encoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes elif self isencoderdecoder addedhiddenstates 2 else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength def testretaingradhiddenstatesattentionsself removed retaingrad and grad on decoderhiddenstates as queries don t require grad config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice inputs self prepareforclassinputsdict modelclass outputs modelinputs output outputs0 encoderhiddenstates outputs encoderhiddenstates0 encoderattentions outputs encoderattentions0 encoderhiddenstates retaingrad encoderattentions retaingrad decoderattentions outputs decoderattentions0 decoderattentions retaingrad crossattentions outputs crossattentions0 crossattentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnoneencoderhiddenstates grad self assertisnotnoneencoderattentions grad self assertisnotnonedecoderattentions grad self assertisnotnonecrossattentions grad def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys if model config isencoderdecoder expectedargnames pixelvalues pixelmask expectedargnames extend headmask decoderheadmask encoderoutputs if headmask and decoderheadmask in argnames else self assertlistequalargnames lenexpectedargnames expectedargnames else expectedargnames pixelvalues pixelmask self assertlistequalargnames 1 expectedargnames def testdifferenttimmbackboneself config inputsdict self modeltester prepareconfigandinputsforcommon let s pick a random timm backbone config backbone tfmobilenetv3small075 for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if modelclass name conditionaldetrforobjectdetection expectedshape self modeltester batchsize self modeltester numqueries self modeltester numlabels self assertequaloutputs logits shape expectedshape self asserttrueoutputs def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig configsnoinit initxavierstd 1e9 for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad if bboxattention in name and bias not in name self assertless 100000 absparam data max item msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized tolerance 1e4 we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretimm requirevision slow class conditionaldetrmodelintegrationtestsunittest testcase cachedproperty def defaultimageprocessorself return conditionaldetrimageprocessor frompretrainedmicrosoftconditionaldetrresnet50 if isvisionavailable else none def testinferencenoheadself model conditionaldetrmodel frompretrainedmicrosoftconditionaldetrresnet50 totorchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice with torch nograd outputs modelencoding expectedshape torch size1 300 256 self assertequaloutputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 4222 0 7471 0 8760 0 6395 0 2729 0 7127 0 3090 0 7642 0 9529 totorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 def testinferenceobjectdetectionheadself model conditionaldetrforobjectdetection frompretrainedmicrosoftconditionaldetrresnet50 to torchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice pixelvalues encodingpixelvalues totorchdevice pixelmask encodingpixelmask totorchdevice with torch nograd outputs modelpixelvalues pixelmask verify logits box predictions expectedshapelogits torch size1 model config numqueries model config numlabels self assertequaloutputs logits shape expectedshapelogits expectedslicelogits torch tensor 10 4372 5 7558 8 6764 10 5410 5 8704 8 0590 10 6827 6 3469 8 3923 totorchdevice self asserttruetorch allcloseoutputs logits0 3 3 expectedslicelogits atol1e4 expectedshapeboxes torch size1 model config numqueries 4 self assertequaloutputs predboxes shape expectedshapeboxes expectedsliceboxes torch tensor 0 7733 0 6576 0 4496 0 5171 0 1184 0 9094 0 8846 0 5647 0 2486 totorchdevice self asserttruetorch allcloseoutputs predboxes0 3 3 expectedsliceboxes atol1e4 verify postprocessing results imageprocessor postprocessobjectdetection outputs threshold0 3 targetsizesimage size 1 0 expectedscores torch tensor0 8330 0 8313 0 8039 0 6829 0 5355 totorchdevice expectedlabels 75 17 17 75 63 expectedsliceboxes torch tensor38 3089 72 1022 177 6293 118 4512 totorchdevice self assertequallenresultsscores 5 self asserttruetorch allcloseresultsscores expectedscores atol1e4 self assertsequenceequalresultslabels tolist expectedlabels self asserttruetorch allcloseresultsboxes0 expectedsliceboxes coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch conditional detr model we also set the expected seq length for both encoder and decoder labels is a list of dict each dict being the labels for a given example in the batch special case for head models todo check if this works again for pytorch 2 x y todo niels fix me check that output_attentions also work using config loss is at first position loss is added to beginning object detection model returns pred_logits and pred_boxes panoptic segmentation model returns pred_logits pred_boxes pred_masks past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine removed retain_grad and grad on decoder_hidden_states as queries don t require grad no need to test all models as different heads yield the same functionality signature parameters is an ordereddict so arg_names order is deterministic let s pick a random timm backbone we will verify our results on an image of cute cats verify logits box predictions verify postprocessing
import inspect import math import unittest from transformers import ConditionalDetrConfig, ResNetConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_timm, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ) if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class ConditionalDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=91, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return ConditionalDetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, use_timm_backbone=False, backbone_config=resnet_config, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_conditional_detr_model(self, config, pixel_values, pixel_mask, labels): model = ConditionalDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_conditional_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = ConditionalDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ConditionalDetrModel, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": ConditionalDetrModel, "object-detection": ConditionalDetrForObjectDetection} if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = ConditionalDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=ConditionalDetrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_conditional_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_conditional_detr_model(*config_and_inputs) def test_conditional_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_conditional_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="Conditional DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Conditional DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="Conditional DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Conditional DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow def test_model_outputs_equivalence(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 6 if "labels" in inputs_dict: correct_outlen += 1 if model_class.__name__ == "ConditionalDetrForObjectDetection": correct_outlen += 1 if model_class.__name__ == "ConditionalDetrForSegmentation": correct_outlen += 2 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "ConditionalDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class ConditionalDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return ( ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") if is_vision_available() else None ) def test_inference_no_head(self): model = ConditionalDetrModel.from_pretrained("microsoft/conditional-detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 300, 256)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.4222, 0.7471, 0.8760], [0.6395, -0.2729, 0.7127], [-0.3090, 0.7642, 0.9529]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_object_detection_head(self): model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-10.4372, -5.7558, -8.6764], [-10.5410, -5.8704, -8.0590], [-10.6827, -6.3469, -8.3923]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.7733, 0.6576, 0.4496], [0.5171, 0.1184, 0.9094], [0.8846, 0.5647, 0.2486]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.8330, 0.8313, 0.8039, 0.6829, 0.5355]).to(torch_device) expected_labels = [75, 17, 17, 75, 63] expected_slice_boxes = torch.tensor([38.3089, 72.1022, 177.6293, 118.4512]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch convbert model import os import tempfile import unittest from transformers import convbertconfig istorchavailable from transformers models auto import getvalues from transformers testingutils import requiretorch requiretorchaccelerator slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelforquestionansweringmapping convbertformaskedlm convbertformultiplechoice convbertforquestionanswering convbertforsequenceclassification convbertfortokenclassification convbertmodel from transformers models convbert modelingconvbert import convbertpretrainedmodelarchivelist class convbertmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels def getconfigself return convbertconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange def prepareconfigandinputsfordecoderself config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels self prepareconfigandinputs config isdecoder true encoderhiddenstates floatstensorself batchsize self seqlength self hiddensize encoderattentionmask idstensorself batchsize self seqlength vocabsize2 return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model convbertmodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckformaskedlm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model convbertformaskedlmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckforquestionanswering self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model convbertforquestionansweringconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforsequenceclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model convbertforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckfortokenclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model convbertfortokenclassificationconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckformultiplechoice self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numchoices self numchoices model convbertformultiplechoiceconfigconfig model totorchdevice model eval multiplechoiceinputsids inputids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoicetokentypeids tokentypeids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceinputmask inputmask unsqueeze1 expand1 self numchoices 1 contiguous result model multiplechoiceinputsids attentionmaskmultiplechoiceinputmask tokentypeidsmultiplechoicetokentypeids labelschoicelabels self parent assertequalresult logits shape self batchsize self numchoices def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class convbertmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses convbertmodel convbertformaskedlm convbertformultiplechoice convbertforquestionanswering convbertforsequenceclassification convbertfortokenclassification if istorchavailable else pipelinemodelmapping featureextraction convbertmodel fillmask convbertformaskedlm questionanswering convbertforquestionanswering textclassification convbertforsequenceclassification tokenclassification convbertfortokenclassification zeroshot convbertforsequenceclassification if istorchavailable else testpruning false testheadmasking false def setupself self modeltester convbertmodeltesterself self configtester configtesterself configclassconvbertconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in convbertpretrainedmodelarchivelist 1 model convbertmodel frompretrainedmodelname self assertisnotnonemodel def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen decoderkeylength getattrself modeltester decoderkeylength decoderseqlength encoderkeylength getattrself modeltester keylength encoderseqlength chunklength getattrself modeltester chunklength none if chunklength is not none and hasattrself modeltester numhashes encoderseqlength encoderseqlength self modeltester numhashes for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers if chunklength is not none self assertlistequal listattentions0 shape4 self modeltester numattentionheads 2 encoderseqlength chunklength encoderkeylength else self assertlistequal listattentions0 shape3 self modeltester numattentionheads 2 encoderseqlength encoderkeylength outlen lenoutputs if self isencoderdecoder correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning question answering model returns startlogits and endlogits if modelclass in getvaluesmodelforquestionansweringmapping correctoutlen 1 startlogits and endlogits instead of only 1 output if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength encoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes elif self isencoderdecoder addedhiddenstates 2 else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers if chunklength is not none self assertlistequal listselfattentions0 shape4 self modeltester numattentionheads 2 encoderseqlength chunklength encoderkeylength else self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads 2 encoderseqlength encoderkeylength slow requiretorchaccelerator def testtorchscriptdevicechangeself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses convbertformultiplechoice behaves incorrectly in jit environments if modelclass convbertformultiplechoice return config torchscript true model modelclassconfigconfig inputsdict self prepareforclassinputsdict modelclass tracedmodel torch jit trace model inputsdictinputids tocpu inputsdictattentionmask tocpu with tempfile temporarydirectory as tmp torch jit savetracedmodel os path jointmp tracedmodel pt loaded torch jit loados path jointmp tracedmodel pt maplocationtorchdevice loadedinputsdictinputids totorchdevice inputsdictattentionmask totorchdevice def testmodelforinputembedsself batchsize 2 seqlength 10 inputsembeds torch randbatchsize seqlength 768 devicetorchdevice config self modeltester getconfig model convbertmodelconfigconfig model totorchdevice model eval result modelinputsembedsinputsembeds self assertequalresult lasthiddenstate shape batchsize seqlength config hiddensize def testreducingattentionheadsself config inputsdict self modeltester prepareconfigandinputs config headratio 4 self modeltester createandcheckformaskedlmconfig inputsdict requiretorch class convbertmodelintegrationtestunittest testcase slow def testinferencenoheadself model convbertmodel frompretrainedyitutechconvbertbase inputids torch tensor1 2 3 4 5 6 with torch nograd output modelinputids0 expectedshape torch size1 6 768 self assertequaloutput shape expectedshape expectedslice torch tensor 0 0864 0 4898 0 3677 0 1434 0 2952 0 7640 0 0112 0 4432 0 5432 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch convbert model check that output_attentions also work using config loss is at first position loss is added to beginning question answering model returns start_logits and end_logits start_logits and end_logits instead of only 1 output past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine convbertformultiplechoice behaves incorrectly in jit environments
import os import tempfile import unittest from transformers import ConvBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ) from transformers.models.convbert.modeling_convbert import CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST class ConvBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return ConvBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ConvBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ConvBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = ConvBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ConvBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ConvBertModel, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ConvBertModel, "fill-mask": ConvBertForMaskedLM, "question-answering": ConvBertForQuestionAnswering, "text-classification": ConvBertForSequenceClassification, "token-classification": ConvBertForTokenClassification, "zero-shot": ConvBertForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_head_masking = False def setUp(self): self.model_tester = ConvBertModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ConvBertModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class == ConvBertForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt")) loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) def test_model_for_input_embeds(self): batch_size = 2 seq_length = 10 inputs_embeds = torch.rand([batch_size, seq_length, 768], device=torch_device) config = self.model_tester.get_config() model = ConvBertModel(config=config) model.to(torch_device) model.eval() result = model(inputs_embeds=inputs_embeds) self.assertEqual(result.last_hidden_state.shape, (batch_size, seq_length, config.hidden_size)) def test_reducing_attention_heads(self): config, *inputs_dict = self.model_tester.prepare_config_and_inputs() config.head_ratio = 4 self.model_tester.create_and_check_for_masked_lm(config, *inputs_dict) @require_torch class ConvBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = ConvBertModel.from_pretrained("YituTech/conv-bert-base") input_ids = torch.tensor([[1, 2, 3, 4, 5, 6]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0864, -0.4898, -0.3677], [0.1434, -0.2952, -0.7640], [-0.0112, -0.4432, -0.5432]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check that output attentions can also be changed via the config check attention is always last and order is fine coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check that output attentions can also be changed via the config check attention is always last and order is fine
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class TFConvBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 384 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.embedding_size = 128 self.head_ratio = 2 self.conv_kernel_size = 9 self.num_groups = 1 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = ConvBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFConvBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFConvBertForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFConvBertForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFConvBertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFConvBertForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFConvBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFConvBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFConvBertModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) if self.is_encoder_decoder: output_hidden_states = outputs["encoder_hidden_states"] output_attentions = outputs["encoder_attentions"] else: output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] self.assertEqual(len(outputs), num_out) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) @slow def test_model_from_pretrained(self): model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base") self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) def check_decoder_attentions_output(outputs): out_len = len(outputs) self.assertEqual(out_len % 2, 0) decoder_attentions = outputs.decoder_attentions self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], ) def check_encoder_attentions_output(outputs): attentions = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) out_len = len(outputs) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) if self.is_encoder_decoder: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_decoder_attentions_output(outputs) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) inputs_dict["output_attentions"] = True config.output_hidden_states = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs)) self.assertEqual(model.config.output_hidden_states, True) check_encoder_attentions_output(outputs) @require_tf class TFConvBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 768] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
codingutf8 2022s huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2022s huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import ConvNextImageProcessor class ConvNextImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, crop_pct=0.875, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"shortest_edge": 20} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.crop_pct = crop_pct self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_pct": self.crop_pct, } def expected_output_image_shape(self, images): return self.num_channels, self.size["shortest_edge"], self.size["shortest_edge"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ConvNextImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ConvNextImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ConvNextImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "crop_pct")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"shortest_edge": 42})
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch convnext model import unittest from transformers import convnextconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testbackbonecommon import backbonetestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import convnextbackbone convnextforimageclassification convnextmodel from transformers models convnext modelingconvnext import convnextpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class convnextmodeltester def init self parent batchsize13 imagesize32 numchannels3 numstages4 hiddensizes10 20 30 40 depths2 2 3 2 istrainingtrue uselabelstrue intermediatesize37 hiddenactgelu numlabels10 initializerrange0 02 outfeaturesstage2 stage3 stage4 outindices2 3 4 scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self numstages numstages self hiddensizes hiddensizes self depths depths self istraining istraining self uselabels uselabels self intermediatesize intermediatesize self hiddenact hiddenact self numlabels numlabels self initializerrange initializerrange self outfeatures outfeatures self outindices outindices self scope scope def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself return convnextconfig numchannelsself numchannels hiddensizesself hiddensizes depthsself depths numstagesself numstages hiddenactself hiddenact isdecoderfalse initializerrangeself initializerrange outfeaturesself outfeatures outindicesself outindices numlabelsself numlabels def createandcheckmodelself config pixelvalues labels model convnextmodelconfigconfig model totorchdevice model eval result modelpixelvalues expected last hidden states b c h 32 w 32 self parent assertequal result lasthiddenstate shape self batchsize self hiddensizes1 self imagesize 32 self imagesize 32 def createandcheckforimageclassificationself config pixelvalues labels model convnextforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckbackboneself config pixelvalues labels model convnextbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify hidden states self parent assertequallenresult featuremaps lenconfig outfeatures self parent assertlistequallistresult featuremaps0 shape self batchsize self hiddensizes1 4 4 verify channels self parent assertequallenmodel channels lenconfig outfeatures self parent assertlistequalmodel channels config hiddensizes1 verify backbone works with outfeaturesnone config outfeatures none model convnextbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequallistresult featuremaps0 shape self batchsize self hiddensizes1 1 1 verify channels self parent assertequallenmodel channels 1 self parent assertlistequalmodel channels config hiddensizes1 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class convnextmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses convnextmodel convnextforimageclassification convnextbackbone if istorchavailable else pipelinemodelmapping featureextraction convnextmodel imageclassification convnextforimageclassification if istorchavailable else fxcompatible true testpruning false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester convnextmodeltesterself self configtester configtesterself configclassconvnextconfig hastextmodalityfalse hiddensize37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return unittest skipreasonconvnext does not use inputsembeds def testinputsembedsself pass unittest skipreasonconvnext does not support input and output embeddings def testmodelcommonattributesself pass unittest skipreasonconvnext does not use feedforward chunking def testfeedforwardchunkingself pass def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testbackboneself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbackboneconfigandinputs def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumstages self modeltester numstages self assertequallenhiddenstates expectednumstages 1 convnext s feature maps are of shape batchsize numchannels height width self assertlistequal listhiddenstates0 shape2 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in convnextpretrainedmodelarchivelist 1 model convnextmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class convnextmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedfacebookconvnexttiny224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model convnextforimageclassification frompretrainedfacebookconvnexttiny224 totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 0260 0 4739 0 1911 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 requiretorch class convnextbackbonetestunittest testcase backbonetestermixin allmodelclasses convnextbackbone if istorchavailable else configclass convnextconfig hasattentions false def setupself self modeltester convnextmodeltesterself coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch convnext model expected last hidden states b c h 32 w 32 verify hidden states verify channels verify backbone works with out_features none verify feature maps verify channels here we also overwrite some of the tests of test_modeling_common py as convnext does not use input_ids inputs_embeds attention_mask and seq_length convnext s feature maps are of shape batch_size num_channels height width check that output_hidden_states also work using config we will verify our results on an image of cute cats forward pass verify the logits
import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class ConvNextModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[2, 2, 3, 2], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", num_labels=10, initializer_range=0.02, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_labels = num_labels self.initializer_range = initializer_range self.out_features = out_features self.out_indices = out_indices self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ConvNextConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=False, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = ConvNextModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): model = ConvNextForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = ConvNextBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) config.out_features = None model = ConvNextBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ConvNextModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = ConvNextModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvNextConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="ConvNext does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ConvNext does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="ConvNext does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ConvNextModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ConvNextModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.0260, -0.4739, 0.1911]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @require_torch class ConvNextBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (ConvNextBackbone,) if is_torch_available() else () config_class = ConvNextConfig has_attentions = False def setUp(self): self.model_tester = ConvNextModelTester(self)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow convnext model from future import annotations import inspect import unittest from typing import list tuple from transformers import convnextconfig from transformers testingutils import requiretf requirevision slow from transformers utils import cachedproperty istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfconvnextforimageclassification tfconvnextmodel if isvisionavailable from pil import image from transformers import convnextimageprocessor class tfconvnextmodeltester def init self parent batchsize13 imagesize32 numchannels3 numstages4 hiddensizes10 20 30 40 depths2 2 3 2 istrainingtrue uselabelstrue intermediatesize37 hiddenactgelu typesequencelabelsize10 initializerrange0 02 numlabels3 scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self numstages numstages self hiddensizes hiddensizes self depths depths self istraining istraining self uselabels uselabels self intermediatesize intermediatesize self hiddenact hiddenact self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return convnextconfig numchannelsself numchannels hiddensizesself hiddensizes depthsself depths numstagesself numstages hiddenactself hiddenact isdecoderfalse initializerrangeself initializerrange def createandcheckmodelself config pixelvalues labels model tfconvnextmodelconfigconfig result modelpixelvalues trainingfalse expected last hidden states b c h 32 w 32 self parent assertequal result lasthiddenstate shape self batchsize self hiddensizes1 self imagesize 32 self imagesize 32 def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model tfconvnextforimageclassificationconfig result modelpixelvalues labelslabels trainingfalse self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfconvnextmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfconvnextmodel tfconvnextforimageclassification if istfavailable else pipelinemodelmapping featureextraction tfconvnextmodel imageclassification tfconvnextforimageclassification if istfavailable else testpruning false testonnx false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester tfconvnextmodeltesterself self configtester configtester self configclassconvnextconfig hastextmodalityfalse hiddensize37 unittest skipreasonconvnext does not use inputsembeds def testinputsembedsself pass unittest skipif not istfavailable or lentf config listphysicaldevicesgpu 0 reasontf does not support backprop for grouped convolutions on cpu slow def testkerasfitself super testkerasfit unittest skipreasonconvnext does not support input and output embeddings def testmodelcommonattributesself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipif not istfavailable or lentf config listphysicaldevicesgpu 0 reasontf does not support backprop for grouped convolutions on cpu def testdatasetconversionself super testdatasetconversion def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumstages self modeltester numstages self assertequallenhiddenstates expectednumstages 1 convnext s feature maps are of shape batchsize numchannels height width self assertlistequal listhiddenstates0 shape2 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass since convnext does not have any attention we need to rewrite this test def testmodeloutputsequivalenceself config inputsdict self modeltester prepareconfigandinputsforcommon def checkequivalencemodel tupleinputs dictinputs additionalkwargs tupleoutput modeltupleinputs returndictfalse additionalkwargs dictoutput modeldictinputs returndicttrue additionalkwargs totuple def recursivechecktupleobject dictobject if isinstancetupleobject list tuple for tupleiterablevalue dictiterablevalue in ziptupleobject dictobject recursivechecktupleiterablevalue dictiterablevalue elif tupleobject is none return else self asserttrue alltf equaltupleobject dictobject msg tuple and dict output are not equal difference f tf math reducemaxtf abstupleobject dictobject recursivechecktupleoutput dictoutput for modelclass in self allmodelclasses model modelclassconfig tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputhiddenstates true tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs outputhiddenstates true def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself model tfconvnextmodel frompretrainedfacebookconvnexttiny224 self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretf requirevision class tfconvnextmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return convnextimageprocessor frompretrainedfacebookconvnexttiny224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model tfconvnextforimageclassification frompretrainedfacebookconvnexttiny224 imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorstf forward pass outputs modelinputs verify the logits expectedshape tf tensorshape1 1000 self assertequaloutputs logits shape expectedshape expectedslice tf constant0 0260 0 4739 0 1911 tf debugging assertnearoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow convnext model expected last hidden states b c h 32 w 32 here we also overwrite some of the tests of test_modeling_common py as convnext does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic convnext s feature maps are of shape batch_size num_channels height width check that output_hidden_states also work using config since convnext does not have any attention we need to rewrite this test we will verify our results on an image of cute cats forward pass verify the logits
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import ConvNextConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFConvNextForImageClassification, TFConvNextModel if is_vision_available(): from PIL import Image from transformers import ConvNextImageProcessor class TFConvNextModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[2, 2, 3, 2], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ConvNextConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = TFConvNextModel(config=config) result = model(pixel_values, training=False) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFConvNextForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFConvNextModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFConvNextModel, TFConvNextForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFConvNextModel, "image-classification": TFConvNextForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = TFConvNextModelTester(self) self.config_tester = ConfigTester( self, config_class=ConvNextConfig, has_text_modality=False, hidden_size=37, ) @unittest.skip(reason="ConvNext does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) @slow def test_keras_fit(self): super().test_keras_fit() @unittest.skip(reason="ConvNext does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(tuple_object, dict_object)), msg=( "Tuple and dict output are not equal. Difference:" f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224") self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFConvNextModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ConvNextImageProcessor.from_pretrained("facebook/convnext-tiny-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") outputs = model(**inputs) expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.0260, -0.4739, 0.1911]) tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch convnextv2 model import unittest from transformers import convnextv2config from transformers models auto import getvalues from transformers models auto modelingauto import modelforbackbonemappingnames modelmappingnames from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import convnextv2backbone convnextv2forimageclassification convnextv2model from transformers models convnextv2 modelingconvnextv2 import convnextv2pretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class convnextv2modeltester def init self parent batchsize13 imagesize32 numchannels3 numstages4 hiddensizes10 20 30 40 depths2 2 3 2 istrainingtrue uselabelstrue intermediatesize37 hiddenactgelu numlabels10 initializerrange0 02 outfeaturesstage2 stage3 stage4 outindices2 3 4 scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self numstages numstages self hiddensizes hiddensizes self depths depths self istraining istraining self uselabels uselabels self intermediatesize intermediatesize self hiddenact hiddenact self numlabels numlabels self initializerrange initializerrange self outfeatures outfeatures self outindices outindices self scope scope def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself return convnextv2config numchannelsself numchannels hiddensizesself hiddensizes depthsself depths numstagesself numstages hiddenactself hiddenact isdecoderfalse initializerrangeself initializerrange outfeaturesself outfeatures outindicesself outindices numlabelsself numlabels def createandcheckmodelself config pixelvalues labels model convnextv2modelconfigconfig model totorchdevice model eval result modelpixelvalues expected last hidden states b c h 32 w 32 self parent assertequal result lasthiddenstate shape self batchsize self hiddensizes1 self imagesize 32 self imagesize 32 def createandcheckforimageclassificationself config pixelvalues labels model convnextv2forimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self numlabels def createandcheckbackboneself config pixelvalues labels model convnextv2backboneconfigconfig model totorchdevice model eval result modelpixelvalues verify hidden states self parent assertequallenresult featuremaps lenconfig outfeatures self parent assertlistequallistresult featuremaps0 shape self batchsize self hiddensizes1 4 4 verify channels self parent assertequallenmodel channels lenconfig outfeatures self parent assertlistequalmodel channels config hiddensizes1 verify backbone works with outfeaturesnone config outfeatures none model convnextv2backboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequallistresult featuremaps0 shape self batchsize self hiddensizes1 1 1 verify channels self parent assertequallenmodel channels 1 self parent assertlistequalmodel channels config hiddensizes1 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict def prepareconfigandinputswithlabelsself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues labels labels return config inputsdict requiretorch class convnextv2modeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses convnextv2model convnextv2forimageclassification convnextv2backbone if istorchavailable else pipelinemodelmapping featureextraction convnextv2model imageclassification convnextv2forimageclassification if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester convnextv2modeltesterself self configtester configtesterself configclassconvnextv2config hastextmodalityfalse hiddensize37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return unittest skipreasonconvnextv2 does not use inputsembeds def testinputsembedsself pass unittest skipreasonconvnextv2 does not support input and output embeddings def testmodelcommonattributesself pass unittest skipreasonconvnextv2 does not use feedforward chunking def testfeedforwardchunkingself pass def testtrainingself if not self modeltester istraining return for modelclass in self allmodelclasses config inputsdict self modeltester prepareconfigandinputswithlabels config returndict true if modelclass name in getvaluesmodelmappingnames getvaluesmodelforbackbonemappingnames continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testtraininggradientcheckpointingself if not self modeltester istraining return for modelclass in self allmodelclasses config inputsdict self modeltester prepareconfigandinputswithlabels config usecache false config returndict true if modelclass name in getvaluesmodelmappingnames getvaluesmodelforbackbonemappingnames or not modelclass supportsgradientcheckpointing continue model modelclassconfig model totorchdevice model gradientcheckpointingenable model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumstages self modeltester numstages self assertequallenhiddenstates expectednumstages 1 convnextv2 s feature maps are of shape batchsize numchannels height width self assertlistequal listhiddenstates0 shape2 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in convnextv2pretrainedmodelarchivelist 1 model convnextv2model frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class convnextv2modelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedfacebookconvnextv2tiny1k224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model convnextv2forimageclassification frompretrainedfacebookconvnextv2tiny1k224 totorchdevice preprocessor self defaultimageprocessor image prepareimg inputs preprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 9996 0 1966 0 4386 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch convnextv2 model expected last hidden states b c h 32 w 32 verify hidden states verify channels verify backbone works with out_features none verify feature maps verify channels here we also overwrite some of the tests of test_modeling_common py as convnextv2 does not use input_ids inputs_embeds attention_mask and seq_length convnextv2 s feature maps are of shape batch_size num_channels height width check that output_hidden_states also work using config we will verify our results on an image of cute cats forward pass verify the logits
import unittest from transformers import ConvNextV2Config from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextV2Backbone, ConvNextV2ForImageClassification, ConvNextV2Model from transformers.models.convnextv2.modeling_convnextv2 import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class ConvNextV2ModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[2, 2, 3, 2], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", num_labels=10, initializer_range=0.02, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_labels = num_labels self.initializer_range = initializer_range self.out_features = out_features self.out_indices = out_indices self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ConvNextV2Config( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=False, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = ConvNextV2Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): model = ConvNextV2ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = ConvNextV2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) config.out_features = None model = ConvNextV2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def prepare_config_and_inputs_with_labels(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class ConvNextV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ConvNextV2Model, ConvNextV2ForImageClassification, ConvNextV2Backbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": ConvNextV2Model, "image-classification": ConvNextV2ForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = ConvNextV2ModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvNextV2Config, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels() config.return_dict = True if model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels() config.use_cache = False config.return_dict = True if ( model_class.__name__ in [*get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] or not model_class.supports_gradient_checkpointing ): continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ConvNextV2Model.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ConvNextV2ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = ConvNextV2ForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(torch_device) preprocessor = self.default_image_processor image = prepare_img() inputs = preprocessor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.9996, 0.1966, -0.4386]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow convnext model from future import annotations import inspect import unittest from typing import list tuple import numpy as np from transformers import convnextv2config from transformers testingutils import requiretf requirevision slow from transformers utils import cachedproperty istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfconvnextv2forimageclassification tfconvnextv2model if isvisionavailable from pil import image from transformers import convnextimageprocessor class tfconvnextv2modeltester def init self parent batchsize13 imagesize32 numchannels3 numstages4 hiddensizes10 20 30 40 depths2 2 3 2 istrainingtrue uselabelstrue intermediatesize37 hiddenactgelu typesequencelabelsize10 initializerrange0 02 numlabels3 scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self numstages numstages self hiddensizes hiddensizes self depths depths self istraining istraining self uselabels uselabels self intermediatesize intermediatesize self hiddenact hiddenact self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return convnextv2config numchannelsself numchannels hiddensizesself hiddensizes depthsself depths numstagesself numstages hiddenactself hiddenact isdecoderfalse initializerrangeself initializerrange def createandcheckmodelself config pixelvalues labels model tfconvnextv2modelconfigconfig result modelpixelvalues trainingfalse expected last hidden states batchsize channels height 32 width 32 self parent assertequal result lasthiddenstate shape self batchsize self hiddensizes1 self imagesize 32 self imagesize 32 def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model tfconvnextv2forimageclassificationconfig result modelpixelvalues labelslabels trainingfalse self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfconvnextv2modeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfconvnextv2model tfconvnextv2forimageclassification if istfavailable else pipelinemodelmapping featureextraction tfconvnextv2model imageclassification tfconvnextv2forimageclassification if istfavailable else testpruning false testonnx false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester tfconvnextv2modeltesterself self configtester configtester self configclassconvnextv2config hastextmodalityfalse hiddensize37 unittest skipreasonconvnext does not use inputsembeds def testinputsembedsself pass unittest skipif not istfavailable or lentf config listphysicaldevicesgpu 0 reasontf does not support backprop for grouped convolutions on cpu slow def testkerasfitself super testkerasfit unittest skipreasonconvnext does not support input and output embeddings def testmodelcommonattributesself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipif not istfavailable or lentf config listphysicaldevicesgpu 0 reasontf does not support backprop for grouped convolutions on cpu def testdatasetconversionself super testdatasetconversion def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumstages self modeltester numstages self assertequallenhiddenstates expectednumstages 1 convnext s feature maps are of shape batchsize numchannels height width self assertlistequal listhiddenstates0 shape2 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass since convnext does not have any attention we need to rewrite this test def testmodeloutputsequivalenceself config inputsdict self modeltester prepareconfigandinputsforcommon def checkequivalencemodel tupleinputs dictinputs additionalkwargs tupleoutput modeltupleinputs returndictfalse additionalkwargs dictoutput modeldictinputs returndicttrue additionalkwargs totuple def recursivechecktupleobject dictobject if isinstancetupleobject list tuple for tupleiterablevalue dictiterablevalue in ziptupleobject dictobject recursivechecktupleiterablevalue dictiterablevalue elif tupleobject is none return else self asserttrue alltf equaltupleobject dictobject msg tuple and dict output are not equal difference f tf math reducemaxtf abstupleobject dictobject recursivechecktupleoutput dictoutput for modelclass in self allmodelclasses model modelclassconfig tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputhiddenstates true tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs outputhiddenstates true def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself model tfconvnextv2model frompretrainedfacebookconvnextv2tiny1k224 self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretf requirevision class tfconvnextv2modelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return convnextimageprocessor frompretrainedfacebookconvnextv2tiny1k224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model tfconvnextv2forimageclassification frompretrainedfacebookconvnextv2tiny1k224 imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorstf forward pass outputs modelinputs verify the logits expectedshape tf tensorshape1 1000 self assertequaloutputs logits shape expectedshape expectedslice np array0 9996 0 1966 0 4386 self asserttruenp allcloseoutputs logits0 3 numpy expectedslice atol1e4 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow convnext model expected last hidden states batch_size channels height 32 width 32 here we also overwrite some of the tests of test_modeling_common py as convnext does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic convnext s feature maps are of shape batch_size num_channels height width check that output_hidden_states also work using config since convnext does not have any attention we need to rewrite this test we will verify our results on an image of cute cats forward pass verify the logits
from __future__ import annotations import inspect import unittest from typing import List, Tuple import numpy as np from transformers import ConvNextV2Config from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFConvNextV2ForImageClassification, TFConvNextV2Model if is_vision_available(): from PIL import Image from transformers import ConvNextImageProcessor class TFConvNextV2ModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[2, 2, 3, 2], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ConvNextV2Config( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = TFConvNextV2Model(config=config) result = model(pixel_values, training=False) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFConvNextV2ForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFConvNextV2ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFConvNextV2Model, TFConvNextV2ForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFConvNextV2Model, "image-classification": TFConvNextV2ForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = TFConvNextV2ModelTester(self) self.config_tester = ConfigTester( self, config_class=ConvNextV2Config, has_text_modality=False, hidden_size=37, ) @unittest.skip(reason="ConvNext does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) @slow def test_keras_fit(self): super().test_keras_fit() @unittest.skip(reason="ConvNext does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(tuple_object, dict_object)), msg=( "Tuple and dict output are not equal. Difference:" f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFConvNextV2Model.from_pretrained("facebook/convnextv2-tiny-1k-224") self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFConvNextV2ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( ConvNextImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFConvNextV2ForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") outputs = model(**inputs) expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = np.array([0.9996, 0.1966, -0.4386]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4))
codingutf8 2018 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license there is no cpmmodel coding utf 8 2018 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license there is no cpmmodel
from transformers.models.cpm.tokenization_cpm import CpmTokenizer from transformers.testing_utils import custom_tokenizers from ..xlnet.test_modeling_xlnet import XLNetModelTest @custom_tokenizers class CpmTokenizationTest(XLNetModelTest): def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def test_pre_tokenization(self): tokenizer = CpmTokenizer.from_pretrained("TsinghuaAI/CPM-Generate") text = "Hugging Face大法好,谁用谁知道。" normalized_text = "Hugging Face大法好,谁用谁知道。<unk>" bpe_tokens = "▁Hu gg ing ▁ ▂ ▁F ace ▁大法 ▁好 ▁ , ▁谁 ▁用 ▁谁 ▁知 道 ▁ 。".split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [13789, 13283, 1421, 8, 10, 1164, 13608, 16528, 63, 8, 9, 440, 108, 440, 121, 90, 8, 12, 0] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) reconstructed_text = tokenizer.decode(input_bpe_tokens) self.assertEqual(reconstructed_text, normalized_text)
codingutf8 2022 the openbmb team and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch cpmant model import unittest from transformers testingutils import istorchavailable requiretorch tooslow from generation testutils import torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import cpmantconfig cpmantforcausallm cpmantmodel cpmanttokenizer requiretorch class cpmantmodeltester def init self parent batchsize2 seqlength8 istrainingtrue usetokentypeidsfalse useinputmaskfalse uselabelsfalse usemctokenidsfalse vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 numbuckets32 maxdistance128 promptlength8 prompttypes8 segmenttypes8 initstd1 0 returndicttrue self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self usetokentypeids usetokentypeids self useinputmask useinputmask self uselabels uselabels self usemctokenids usemctokenids self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self numbuckets numbuckets self maxdistance maxdistance self promptlength promptlength self prompttypes prompttypes self segmenttypes segmenttypes self initstd initstd self returndict returndict def prepareconfigandinputsself inputids inputidsinputids idstensorself batchsize self seqlength self vocabsize typetorch int32 inputidsusecache false config self getconfig return config inputids def getconfigself return cpmantconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads dimffself intermediatesize positionbiasnumbucketsself numbuckets positionbiasmaxdistanceself maxdistance prompttypesself prompttypes promptlengthself promptlength segmenttypesself segmenttypes usecachetrue initstdself initstd returndictself returndict def createandcheckcpmantmodelself config inputids args model cpmantmodelconfigconfig model totorchdevice model eval hiddenstates modelinputids lasthiddenstate self parent assertequalhiddenstates shape self batchsize self seqlength config hiddensize def createandchecklmheadmodelself config inputids args model cpmantforcausallmconfig model totorchdevice inputidsinputids inputidsinputids totorchdevice model eval modeloutput modelinputids self parent assertequal modeloutput logits shape self batchsize self seqlength config vocabsize config prompttypes config promptlength def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict requiretorch class cpmantmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses cpmantmodel cpmantforcausallm if istorchavailable else pipelinemodelmapping featureextraction cpmantmodel textgeneration cpmantforcausallm if istorchavailable else testpruning false testmissingkeys false testmismatchedshapes false testheadmasking false testresizeembeddings false def setupself self modeltester cpmantmodeltesterself self configtester configtesterself configclasscpmantconfig def testconfigself self configtester createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def testinputsembedsself unittest skipcpmant doesn t support inputembeds self testinputsembeds def testretaingradhiddenstatesattentionsself unittest skip cpmant doesn t support retain grad in hiddenstates or attentions because prompt management will peel off the output hiddenstates from graph so is attentions we strongly recommand you use loss to tune model self testretaingradhiddenstatesattentions def testcpmantmodelself config inputs self modeltester prepareconfigandinputs self modeltester createandcheckcpmantmodelconfig inputs def testcpmantlmheadmodelself config inputs self modeltester prepareconfigandinputs self modeltester createandchecklmheadmodelconfig inputs requiretorch class cpmantmodelintegrationtestunittest testcase tooslow def testinferencemaskedlmself texts modelpath openbmbcpmant10b model cpmantmodel frompretrainedmodelpath tokenizer cpmanttokenizer frompretrainedmodelpath inputs tokenizertexts returntensorspt hiddenstates modelinputs lasthiddenstate expectedslice torch tensor 6 1708 5 9244 1 0835 6 5207 6 2893 11 3324 1 0107 0 0576 5 9577 self asserttruetorch allclosehiddenstates 3 3 expectedslice atol1e2 requiretorch class cpmantforcausallmlintegrationtestunittest testcase tooslow def testinferencecasualself texts modelpath openbmbcpmant10b model cpmantforcausallm frompretrainedmodelpath tokenizer cpmanttokenizer frompretrainedmodelpath inputs tokenizertexts returntensorspt hiddenstates modelinputs logits expectedslice torch tensor 6 4267 6 4083 6 3958 5 8802 5 9447 5 7811 5 3896 5 4820 5 4295 self asserttruetorch allclosehiddenstates 3 3 expectedslice atol1e2 tooslow def testsimplegenerationself modelpath openbmbcpmant10b model cpmantforcausallm frompretrainedmodelpath tokenizer cpmanttokenizer frompretrainedmodelpath texts expectedoutput n modelinputs tokenizertexts returntensorspt tokenids model generatemodelinputs outputtexts tokenizer batchdecodetokenids self assertequalexpectedoutput outputtexts tooslow def testbatchgenerationself modelpath openbmbcpmant10b model cpmantforcausallm frompretrainedmodelpath tokenizer cpmanttokenizer frompretrainedmodelpath texts expectedoutput n modelinputs tokenizertexts returntensorspt paddingtrue tokenids model generatemodelinputs outputtexts tokenizer batchdecodetokenids self assertequalexpectedoutput outputtexts coding utf 8 2022 the openbmb team and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch cpmant model
import unittest from transformers.testing_utils import is_torch_available, require_torch, tooslow from ...generation.test_utils import torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CpmAntConfig, CpmAntForCausalLM, CpmAntModel, CpmAntTokenizer, ) @require_torch class CpmAntModelTester: def __init__( self, parent, batch_size=2, seq_length=8, is_training=True, use_token_type_ids=False, use_input_mask=False, use_labels=False, use_mc_token_ids=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, num_buckets=32, max_distance=128, prompt_length=8, prompt_types=8, segment_types=8, init_std=1.0, return_dict=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.num_buckets = num_buckets self.max_distance = max_distance self.prompt_length = prompt_length self.prompt_types = prompt_types self.segment_types = segment_types self.init_std = init_std self.return_dict = return_dict def prepare_config_and_inputs(self): input_ids = {} input_ids["input_ids"] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).type(torch.int32) input_ids["use_cache"] = False config = self.get_config() return (config, input_ids) def get_config(self): return CpmAntConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, dim_ff=self.intermediate_size, position_bias_num_buckets=self.num_buckets, position_bias_max_distance=self.max_distance, prompt_types=self.prompt_types, prompt_length=self.prompt_length, segment_types=self.segment_types, use_cache=True, init_std=self.init_std, return_dict=self.return_dict, ) def create_and_check_cpmant_model(self, config, input_ids, *args): model = CpmAntModel(config=config) model.to(torch_device) model.eval() hidden_states = model(**input_ids).last_hidden_state self.parent.assertEqual(hidden_states.shape, (self.batch_size, self.seq_length, config.hidden_size)) def create_and_check_lm_head_model(self, config, input_ids, *args): model = CpmAntForCausalLM(config) model.to(torch_device) input_ids["input_ids"] = input_ids["input_ids"].to(torch_device) model.eval() model_output = model(**input_ids) self.parent.assertEqual( model_output.logits.shape, (self.batch_size, self.seq_length, config.vocab_size + config.prompt_types * config.prompt_length), ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class CpmAntModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CpmAntModel, CpmAntForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": CpmAntModel, "text-generation": CpmAntForCausalLM} if is_torch_available() else {} ) test_pruning = False test_missing_keys = False test_mismatched_shapes = False test_head_masking = False test_resize_embeddings = False def setUp(self): self.model_tester = CpmAntModelTester(self) self.config_tester = ConfigTester(self, config_class=CpmAntConfig) def test_config(self): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_inputs_embeds(self): unittest.skip("CPMAnt doesn't support input_embeds.")(self.test_inputs_embeds) def test_retain_grad_hidden_states_attentions(self): unittest.skip( "CPMAnt doesn't support retain grad in hidden_states or attentions, because prompt management will peel off the output.hidden_states from graph.\ So is attentions. We strongly recommand you use loss to tune model." )(self.test_retain_grad_hidden_states_attentions) def test_cpmant_model(self): config, inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_cpmant_model(config, inputs) def test_cpmant_lm_head_model(self): config, inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(config, inputs) @require_torch class CpmAntModelIntegrationTest(unittest.TestCase): @tooslow def test_inference_masked_lm(self): texts = "今天天气真好!" model_path = "openbmb/cpm-ant-10b" model = CpmAntModel.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) inputs = tokenizer(texts, return_tensors="pt") hidden_states = model(**inputs).last_hidden_state expected_slice = torch.tensor( [[[6.1708, 5.9244, 1.0835], [6.5207, 6.2893, -11.3324], [-1.0107, -0.0576, -5.9577]]], ) self.assertTrue(torch.allclose(hidden_states[:, :3, :3], expected_slice, atol=1e-2)) @require_torch class CpmAntForCausalLMlIntegrationTest(unittest.TestCase): @tooslow def test_inference_casual(self): texts = "今天天气真好!" model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) inputs = tokenizer(texts, return_tensors="pt") hidden_states = model(**inputs).logits expected_slice = torch.tensor( [[[-6.4267, -6.4083, -6.3958], [-5.8802, -5.9447, -5.7811], [-5.3896, -5.4820, -5.4295]]], ) self.assertTrue(torch.allclose(hidden_states[:, :3, :3], expected_slice, atol=1e-2)) @tooslow def test_simple_generation(self): model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) texts = "今天天气不错," expected_output = "今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的" model_inputs = tokenizer(texts, return_tensors="pt") token_ids = model.generate(**model_inputs) output_texts = tokenizer.batch_decode(token_ids) self.assertEqual(expected_output, output_texts) @tooslow def test_batch_generation(self): model_path = "openbmb/cpm-ant-10b" model = CpmAntForCausalLM.from_pretrained(model_path) tokenizer = CpmAntTokenizer.from_pretrained(model_path) texts = ["今天天气不错,", "新年快乐,万事如意!"] expected_output = [ "今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的", "新年快乐,万事如意!在这辞旧迎新的美好时刻,我谨代表《农村新技术》杂志社全体同仁,向一直以来关心、支持《农村新技术》杂志发展的各级领导、各界朋友和广大读者致以最诚挚的", ] model_inputs = tokenizer(texts, return_tensors="pt", padding=True) token_ids = model.generate(**model_inputs) output_texts = tokenizer.batch_decode(token_ids) self.assertEqual(expected_output, output_texts)
codingutf8 2022 the openbmb team and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2022 the openbmb team and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class CPMAntTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = CpmAntTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab_tokens = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) @tooslow def test_pre_tokenization(self): tokenizer = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b") texts = "今天天气真好!" jieba_tokens = ["今天", "天气", "真", "好", "!"] tokens = tokenizer.tokenize(texts) self.assertListEqual(tokens, jieba_tokens) normalized_text = "今天天气真好!" input_tokens = [tokenizer.bos_token] + tokens input_jieba_tokens = [6, 9802, 14962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_jieba_tokens) reconstructed_text = tokenizer.decode(input_jieba_tokens) self.assertEqual(reconstructed_text, normalized_text)
codingutf8 2018 salesforce and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob typevocabsizeself typevocabsize initializerrangeself initializerrange todo fix the failed tests get tokenizer does not have a padding token error for both fastslow tokenizers ctrlconfig was never used in pipeline tests either because of a missing checkpoint or because a tiny config could not be created cleanup as much as possible gpu memory occupied by pytorch cleanup as much as possible gpu memory occupied by pytorch coding utf 8 2018 salesforce and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license hidden_act self hidden_act hidden_dropout_prob self hidden_dropout_prob attention_probs_dropout_prob self attention_probs_dropout_prob type_vocab_size self type_vocab_size initializer_range self initializer_range todo fix the failed tests get tokenizer does not have a padding token error for both fast slow tokenizers ctrlconfig was never used in pipeline tests either because of a missing checkpoint or because a tiny config could not be created clean up as much as possible gpu memory occupied by pytorch and it s not used enough to be worth fixing clean up as much as possible gpu memory occupied by pytorch legal the president is legal the president is a good guy and i don t want to lose my job n n i have a
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class CTRLModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config(self): return CTRLConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, dff=self.intermediate_size, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, ) def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = CTRLModel(config=config) model.to(torch_device) model.eval() model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = CTRLLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask} return config, inputs_dict def create_and_check_ctrl_for_sequence_classification(self, config, input_ids, head_mask, token_type_ids, *args): config.num_labels = self.num_labels model = CTRLForSequenceClassification(config) model.to(torch_device) model.eval() sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) @require_torch class CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () all_generative_model_classes = (CTRLLMHeadModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) test_pruning = True test_resize_embeddings = False test_head_masking = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": return True return False def setUp(self): self.model_tester = CTRLModelTester(self) self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_config(self): self.config_tester.run_common_tests() def test_ctrl_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*config_and_inputs) def test_ctrl_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CTRLModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("The model doesn't support left padding") def test_left_padding_compatibility(self): pass @require_torch class CTRLModelLanguageGenerationTest(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) @slow def test_lm_generate_ctrl(self): model = CTRLLMHeadModel.from_pretrained("Salesforce/ctrl") model.to(torch_device) input_ids = torch.tensor( [[11859, 0, 1611, 8]], dtype=torch.long, device=torch_device ) expected_output_ids = [ 11859, 0, 1611, 8, 5, 150, 26449, 2, 19, 348, 469, 3, 2595, 48, 20740, 246533, 246533, 19, 30, 5, ] output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob typevocabsizeself typevocabsize initializerrangeself initializerrange todo fix the failed tests get tokenizer does not have a padding token error for both fastslow tokenizers ctrlconfig was never used in pipeline tests either because of a missing checkpoint or because a tiny config could not be created coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license hidden_act self hidden_act hidden_dropout_prob self hidden_dropout_prob attention_probs_dropout_prob self attention_probs_dropout_prob type_vocab_size self type_vocab_size initializer_range self initializer_range none is the input for past todo fix the failed tests get tokenizer does not have a padding token error for both fast slow tokenizers ctrlconfig was never used in pipeline tests either because of a missing checkpoint or because a tiny config could not be created may be needed for the get_bias call below legal the president is legal the president is a good guy and i don t want to lose my job n n i have a
from __future__ import annotations import unittest from transformers import CTRLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.ctrl.modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, ) class TFCTRLModelTester(object): def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_token_type_ids = True self.use_input_mask = True self.use_labels = True self.use_mc_token_ids = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = CTRLConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, dff=self.intermediate_size, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFCTRLModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, None, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_ctrl_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFCTRLLMHeadModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_ctrl_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): config.num_labels = self.num_labels sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) inputs = { "input_ids": input_ids, "token_type_ids": token_type_ids, "labels": sequence_labels, } model = TFCTRLForSequenceClassification(config) result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFCTRLModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFCTRLModel, TFCTRLLMHeadModel, TFCTRLForSequenceClassification) if is_tf_available() else () all_generative_model_classes = (TFCTRLLMHeadModel,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFCTRLModel, "text-classification": TFCTRLForSequenceClassification, "text-generation": TFCTRLLMHeadModel, "zero-shot": TFCTRLForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": return True return False def setUp(self): self.model_tester = TFCTRLModelTester(self) self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_ctrl_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*config_and_inputs) def test_ctrl_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_lm_head(*config_and_inputs) def test_ctrl_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_for_sequence_classification(*config_and_inputs) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() list_lm_models = [TFCTRLLMHeadModel] list_other_models_with_output_ebd = [TFCTRLForSequenceClassification] for model_class in self.all_model_classes: model = model_class(config) model.build() assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in list_lm_models: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert isinstance(name, dict) for k, v in name.items(): assert isinstance(v, tf.Variable) elif model_class in list_other_models_with_output_ebd: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert name is None else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None @slow def test_model_from_pretrained(self): for model_name in TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCTRLModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFCTRLModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_ctrl(self): model = TFCTRLLMHeadModel.from_pretrained("Salesforce/ctrl") input_ids = tf.convert_to_tensor([[11859, 0, 1611, 8]], dtype=tf.int32) expected_output_ids = [ 11859, 0, 1611, 8, 5, 150, 26449, 2, 19, 348, 469, 3, 2595, 48, 20740, 246533, 246533, 19, 30, 5, ] output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch cvt model import unittest from math import floor from transformers import cvtconfig from transformers fileutils import cachedproperty istorchavailable isvisionavailable from transformers testingutils import requiretorch requirevision slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import cvtforimageclassification cvtmodel from transformers models cvt modelingcvt import cvtpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class cvtconfigtesterconfigtester def createandtestconfigcommonpropertiesself config self configclassself inputsdict self parent asserttruehasattrconfig embeddim self parent asserttruehasattrconfig numheads class cvtmodeltester def init self parent batchsize13 imagesize64 numchannels3 embeddim16 32 48 numheads1 2 3 depth1 2 10 patchsizes7 3 3 patchstride4 2 2 patchpadding2 1 1 stridekv2 2 2 clstokenfalse false true attentiondroprate0 0 0 0 0 0 initializerrange0 02 layernormeps1e12 istrainingtrue uselabelstrue numlabels2 check self parent parent self batchsize batchsize self imagesize imagesize self patchsizes patchsizes self patchstride patchstride self patchpadding patchpadding self istraining istraining self uselabels uselabels self numlabels numlabels self numchannels numchannels self embeddim embeddim self numheads numheads self stridekv stridekv self depth depth self clstoken clstoken self attentiondroprate attentiondroprate self initializerrange initializerrange self layernormeps layernormeps def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself return cvtconfig imagesizeself imagesize numlabelsself numlabels numchannelsself numchannels embeddimself embeddim numheadsself numheads patchsizesself patchsizes patchpaddingself patchpadding patchstrideself patchstride stridekvself stridekv depthself depth clstokenself clstoken attentiondroprateself attentiondroprate initializerrangeself initializerrange def createandcheckmodelself config pixelvalues labels model cvtmodelconfigconfig model totorchdevice model eval result modelpixelvalues imagesize self imagesize self imagesize height width imagesize0 imagesize1 for i in rangelenself depth height floorheight 2 self patchpaddingi self patchsizesi self patchstridei 1 width floorwidth 2 self patchpaddingi self patchsizesi self patchstridei 1 self parent assertequalresult lasthiddenstate shape self batchsize self embeddim1 height width def createandcheckforimageclassificationself config pixelvalues labels config numlabels self numlabels model cvtforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self numlabels def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class cvtmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses cvtmodel cvtforimageclassification if istorchavailable else pipelinemodelmapping featureextraction cvtmodel imageclassification cvtforimageclassification if istorchavailable else testpruning false testtorchscript false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester cvtmodeltesterself self configtester configtesterself configclasscvtconfig hastextmodalityfalse hiddensize37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return unittest skipreasoncvt does not output attentions def testattentionoutputsself pass unittest skipreasoncvt does not use inputsembeds def testinputsembedsself pass unittest skipreasoncvt does not support input and output embeddings def testmodelcommonattributesself pass def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers lenself modeltester depth self assertequallenhiddenstates expectednumlayers verify the first hidden states first block self assertlistequal listhiddenstates0 shape3 self modeltester embeddim0 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in cvtpretrainedmodelarchivelist 1 model cvtmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class cvtmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedcvtpretrainedmodelarchivelist0 slow def testinferenceimageclassificationheadself model cvtforimageclassification frompretrainedcvtpretrainedmodelarchivelist0 totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 9285 0 9015 0 3150 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch cvt model check here we also overwrite some of the tests of test_modeling_common py as cvt does not use input_ids inputs_embeds attention_mask and seq_length verify the first hidden states first block check that output_hidden_states also work using config we will verify our results on an image of cute cats forward pass verify the logits
import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class CvtConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "embed_dim")) self.parent.assertTrue(hasattr(config, "num_heads")) class CvtModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, embed_dim=[16, 32, 48], num_heads=[1, 2, 3], depth=[1, 2, 10], patch_sizes=[7, 3, 3], patch_stride=[4, 2, 2], patch_padding=[2, 1, 1], stride_kv=[2, 2, 2], cls_token=[False, False, True], attention_drop_rate=[0.0, 0.0, 0.0], initializer_range=0.02, layer_norm_eps=1e-12, is_training=True, use_labels=True, num_labels=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_sizes = patch_sizes self.patch_stride = patch_stride self.patch_padding = patch_padding self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.num_channels = num_channels self.embed_dim = embed_dim self.num_heads = num_heads self.stride_kv = stride_kv self.depth = depth self.cls_token = cls_token self.attention_drop_rate = attention_drop_rate self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return CvtConfig( image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = CvtModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) image_size = (self.image_size, self.image_size) height, width = image_size[0], image_size[1] for i in range(len(self.depth)): height = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) width = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = CvtForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class CvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CvtModel, CvtForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = CvtModelTester(self) self.config_tester = ConfigTester(self, config_class=CvtConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="Cvt does not output attentions") def test_attention_outputs(self): pass @unittest.skip(reason="Cvt does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Cvt does not support input and output embeddings") def test_model_common_attributes(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depth) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CvtModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class CvtModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def test_inference_image_classification_head(self): model = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.9285, 0.9015, -0.3150]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
testing suite for the tensorflow cvt model from future import annotations import inspect import unittest from math import floor import numpy as np from transformers import cvtconfig from transformers testingutils import requiretf requirevision slow from transformers utils import cachedproperty istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfcvtforimageclassification tfcvtmodel from transformers models cvt modelingtfcvt import tfcvtpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class tfcvtconfigtesterconfigtester def createandtestconfigcommonpropertiesself config self configclassself inputsdict self parent asserttruehasattrconfig embeddim self parent asserttruehasattrconfig numheads class tfcvtmodeltester def init self parent batchsize13 imagesize64 numchannels3 embeddim16 32 48 numheads1 2 3 depth1 2 10 patchsizes7 3 3 patchstride4 2 2 patchpadding2 1 1 stridekv2 2 2 clstokenfalse false true attentiondroprate0 0 0 0 0 0 initializerrange0 02 layernormeps1e12 istrainingtrue uselabelstrue numlabels2 self parent parent self batchsize batchsize self imagesize imagesize self patchsizes patchsizes self patchstride patchstride self patchpadding patchpadding self istraining istraining self uselabels uselabels self numlabels numlabels self numchannels numchannels self embeddim embeddim self numheads numheads self stridekv stridekv self depth depth self clstoken clstoken self attentiondroprate attentiondroprate self initializerrange initializerrange self layernormeps layernormeps def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels create a random int32 tensor of given shape labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself return cvtconfig imagesizeself imagesize numlabelsself numlabels numchannelsself numchannels embeddimself embeddim numheadsself numheads patchsizesself patchsizes patchpaddingself patchpadding patchstrideself patchstride stridekvself stridekv depthself depth clstokenself clstoken attentiondroprateself attentiondroprate initializerrangeself initializerrange def createandcheckmodelself config pixelvalues labels model tfcvtmodelconfigconfig result modelpixelvalues trainingfalse imagesize self imagesize self imagesize height width imagesize0 imagesize1 for i in rangelenself depth height floorheight 2 self patchpaddingi self patchsizesi self patchstridei 1 width floorwidth 2 self patchpaddingi self patchsizesi self patchstridei 1 self parent assertequalresult lasthiddenstate shape self batchsize self embeddim1 height width def createandcheckforimageclassificationself config pixelvalues labels config numlabels self numlabels model tfcvtforimageclassificationconfig result modelpixelvalues labelslabels trainingfalse self parent assertequalresult logits shape self batchsize self numlabels def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfcvtmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfcvtmodel tfcvtforimageclassification if istfavailable else pipelinemodelmapping featureextraction tfcvtmodel imageclassification tfcvtforimageclassification if istfavailable else testpruning false testresizeembeddings false testheadmasking false hasattentions false testonnx false def setupself self modeltester tfcvtmodeltesterself self configtester tfcvtconfigtesterself configclasscvtconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit unittest skipreasoncvt does not output attentions def testattentionoutputsself pass unittest skipreasoncvt does not use inputsembeds def testinputsembedsself pass unittest skipreasoncvt does not support input and output embeddings def testmodelcommonattributesself pass unittest skipif not istfavailable or lentf config listphysicaldevicesgpu 0 reasontf does not support backprop for grouped convolutions on cpu def testdatasetconversionself super testdatasetconversion unittest skipif not istfavailable or lentf config listphysicaldevicesgpu 0 reasontf does not support backprop for grouped convolutions on cpu slow def testkerasfitself super testkerasfit unittest skipreasonget failed to determine best cudnn convolution algo error after using tf 2 12cuda 11 8 def testkerasfitmixedprecisionself policy tf keras mixedprecision policymixedfloat16 tf keras mixedprecision setglobalpolicypolicy super testkerasfit tf keras mixedprecision setglobalpolicyfloat32 def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers lenself modeltester depth self assertequallenhiddenstates expectednumlayers verify the first hidden states first block self assertlistequal listhiddenstates0 shape3 self modeltester embeddim0 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in tfcvtpretrainedmodelarchivelist 1 model tfcvtmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretf requirevision class tfcvtmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedtfcvtpretrainedmodelarchivelist0 slow def testinferenceimageclassificationheadself model tfcvtforimageclassification frompretrainedtfcvtpretrainedmodelarchivelist0 imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorstf forward pass outputs modelinputs verify the logits expectedshape tf tensorshape1 1000 self assertequaloutputs logits shape expectedshape expectedslice tf constant0 9285 0 9015 0 3150 self asserttruenp allcloseoutputs logits0 3 numpy expectedslice atol1e4 create a random int32 tensor of given shape here we also overwrite some of the tests of test_modeling_common py as cvt does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic verify the first hidden states first block check that output_hidden_states also work using config we will verify our results on an image of cute cats forward pass verify the logits
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class TFCvtConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "embed_dim")) self.parent.assertTrue(hasattr(config, "num_heads")) class TFCvtModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, embed_dim=[16, 32, 48], num_heads=[1, 2, 3], depth=[1, 2, 10], patch_sizes=[7, 3, 3], patch_stride=[4, 2, 2], patch_padding=[2, 1, 1], stride_kv=[2, 2, 2], cls_token=[False, False, True], attention_drop_rate=[0.0, 0.0, 0.0], initializer_range=0.02, layer_norm_eps=1e-12, is_training=True, use_labels=True, num_labels=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_sizes = patch_sizes self.patch_stride = patch_stride self.patch_padding = patch_padding self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.num_channels = num_channels self.embed_dim = embed_dim self.num_heads = num_heads self.stride_kv = stride_kv self.depth = depth self.cls_token = cls_token self.attention_drop_rate = attention_drop_rate self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return CvtConfig( image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = TFCvtModel(config=config) result = model(pixel_values, training=False) image_size = (self.image_size, self.image_size) height, width = image_size[0], image_size[1] for i in range(len(self.depth)): height = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) width = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = TFCvtForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFCvtModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_onnx = False def setUp(self): self.model_tester = TFCvtModelTester(self) self.config_tester = TFCvtConfigTester(self, config_class=CvtConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions") def test_attention_outputs(self): pass @unittest.skip(reason="Cvt does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Cvt does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) @slow def test_keras_fit(self): super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8") def test_keras_fit_mixed_precision(self): policy = tf.keras.mixed_precision.Policy("mixed_float16") tf.keras.mixed_precision.set_global_policy(policy) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32") def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depth) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCvtModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFCvtModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def test_inference_image_classification_head(self): model = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") outputs = model(**inputs) expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([0.9285, 0.9015, -0.3150]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4))
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch data2vecaudio model import math import unittest import numpy as np from datasets import loaddataset from tests testmodelingcommon import floatstensor idstensor randomattentionmask from transformers import data2vecaudioconfig istorchavailable from transformers testingutils import isptflaxcrosstest requiresoundfile requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import data2vecaudioforaudioframeclassification data2vecaudioforctc data2vecaudioforsequenceclassification data2vecaudioforxvector data2vecaudiomodel wav2vec2processor from transformers models data2vec modelingdata2vecaudio import computemaskindices class data2vecaudiomodeltester def init self parent batchsize13 seqlength1024 speech is longer istrainingfalse hiddensize16 featextractdropout0 0 featextractactivationgelu convdim32 32 32 convstride4 4 4 convkernel8 8 8 convbiasfalse numconvposembeddings16 numconvposembeddinggroups2 numhiddenlayers2 numattentionheads2 hiddendropoutprob0 1 intermediatesize20 layernormeps1e5 hiddenactgelu initializerrange0 02 masktimeprob0 5 masktimelength2 vocabsize32 numadapterlayers1 adapterstride2 tdnndim32 32 tdnnkernel5 3 tdnndilation1 2 xvectoroutputdim32 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self hiddensize hiddensize self featextractdropout featextractdropout self featextractactivation featextractactivation self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddendropoutprob hiddendropoutprob self intermediatesize intermediatesize self layernormeps layernormeps self hiddenact hiddenact self initializerrange initializerrange self vocabsize vocabsize self numadapterlayers numadapterlayers self adapterstride adapterstride self masktimeprob masktimeprob self masktimelength masktimelength self scope scope self tdnndim tdnndim self tdnnkernel tdnnkernel self tdnndilation tdnndilation self xvectoroutputdim xvectoroutputdim outputseqlength self seqlength for kernel stride in zipself convkernel self convstride outputseqlength outputseqlength kernel 1 stride self outputseqlength intmath ceiloutputseqlength self encoderseqlength self outputseqlength self adapteroutputseqlength self outputseqlength 1 adapterstride 1 def prepareconfigandinputsself inputvalues floatstensorself batchsize self seqlength scale1 0 attentionmask randomattentionmaskself batchsize self seqlength config self getconfig return config inputvalues attentionmask def getconfigself return data2vecaudioconfig hiddensizeself hiddensize featextractdropoutself featextractdropout featextractactivationself featextractactivation convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias masktimeprobself masktimeprob masktimelengthself masktimelength numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddendropoutprobself hiddendropoutprob intermediatesizeself intermediatesize layernormepsself layernormeps hiddenactself hiddenact initializerrangeself initializerrange vocabsizeself vocabsize numadapterlayersself numadapterlayers adapterstrideself adapterstride tdnndimself tdnndim tdnnkernelself tdnnkernel tdnndilationself tdnndilation xvectoroutputdimself xvectoroutputdim def createandcheckmodelself config inputvalues attentionmask model data2vecaudiomodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckmodelwithadapterself config inputvalues attentionmask config addadapter true model data2vecaudiomodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self adapteroutputseqlength self hiddensize def createandcheckmodelwithadapterprojdimself config inputvalues attentionmask config addadapter true config outputhiddensize 8 model data2vecaudiomodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self adapteroutputseqlength config outputhiddensize def createandcheckbatchinferenceself config inputvalues args test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 model data2vecaudiomodelconfigconfig model totorchdevice model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch bool inputlengths inputvalues shape1 i for i in 4 2 1 pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 0 batchoutputs modelinputvalues attentionmaskattentionmask lasthiddenstate for i in rangeinputvalues shape0 inputslice inputvaluesi i 1 inputlengthsi output modelinputslice lasthiddenstate batchoutput batchoutputsi i 1 output shape1 self parent asserttruetorch allcloseoutput batchoutput atol1e3 def checkctclossself config inputvalues args model data2vecaudioforctcconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 minmaxlengthlabels 1 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 model config ctclossreduction sum sumloss modelinputvalues attentionmaskattentionmask labelslabels loss item model config ctclossreduction mean meanloss modelinputvalues attentionmaskattentionmask labelslabels loss item self parent asserttrueisinstancesumloss float self parent asserttrueisinstancemeanloss float def checkseqclassifierlossself config inputvalues args model data2vecaudioforsequenceclassificationconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 maskedloss modelinputvalues attentionmaskattentionmask labelslabels loss item unmaskedloss modelinputvalues labelslabels loss item self parent asserttrueisinstancemaskedloss float self parent asserttrueisinstanceunmaskedloss float self parent asserttruemaskedloss unmaskedloss def checkctctrainingself config inputvalues args config ctczeroinfinity true model data2vecaudioforctcconfigconfig model totorchdevice model train freeze feature encoder model freezefeatureencoder inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 if maxlengthlabelsi labels shape1 it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf labelsi maxlengthlabelsi 1 100 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkseqclassifiertrainingself config inputvalues args config ctczeroinfinity true model data2vecaudioforsequenceclassificationconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkxvectortrainingself config inputvalues args config ctczeroinfinity true model data2vecaudioforxvectorconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checklabelsoutofvocabself config inputvalues args model data2vecaudioforctcconfig model totorchdevice model train inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize 100 with self parent assertraisesvalueerror modelinputvalues labelslabels def prepareconfigandinputsforcommonself config inputvalues attentionmask self prepareconfigandinputs inputsdict inputvalues inputvalues attentionmask attentionmask return config inputsdict requiretorch class data2vecaudiomodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses data2vecaudioforctc data2vecaudiomodel data2vecaudioforsequenceclassification data2vecaudioforaudioframeclassification data2vecaudioforxvector if istorchavailable else pipelinemodelmapping audioclassification data2vecaudioforsequenceclassification automaticspeechrecognition data2vecaudioforctc featureextraction data2vecaudiomodel if istorchavailable else testpruning false testheadmasking false def setupself self modeltester data2vecaudiomodeltesterself self configtester configtesterself configclassdata2vecaudioconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelwithadapterself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterconfigandinputs def testmodelwithadapterprojdimself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterprojdimconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testxvectortrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkxvectortrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs data2vecaudio has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass data2vecaudio cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass data2vecaudio has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass isptflaxcrosstest nonrobust architecture does not exist in flax def testequivalenceflaxtoptself pass isptflaxcrosstest nonrobust architecture does not exist in flax def testequivalencepttoflaxself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias featureprojection projection weight featureprojection projection bias objective weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule codevectors and module codevectors is not none module codevectors data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 def testmaskfeatureprobctcself model data2vecaudioforctc frompretrained hfinternaltestingtinyrandomdata2vecseqclass maskfeatureprob0 2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimeprobctcself model data2vecaudioforctc frompretrained facebookdata2vecaudiobase960h masktimeprob0 2 masktimelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 299 32 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model data2vecaudiomodel frompretrainedfacebookdata2vecaudiobase self assertisnotnonemodel requiretorch class data2vecaudioutilstestunittest testcase def testcomputemaskindicesself batchsize 4 sequencelength 60 maskprob 0 5 masklength 1 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice self assertlistequalmask sumaxis1 tolist maskprob sequencelength for in rangebatchsize def testcomputemaskindiceslowprobself with these settings nummaskedspans0 5 which means probabilistic rounding ensures that in 5 out of 10 method calls nummaskedspans0 and in the other 5 out of 10 cases nummaskedspans1 ntrials 100 batchsize 4 sequencelength 100 maskprob 0 05 masklength 10 countdimensionsmasked 0 countdimensionsnotmasked 0 for in rangentrials mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice nummasks torch summask item if nummasks 0 countdimensionsmasked 1 else countdimensionsnotmasked 1 as we test for at least 10 masked dimension and at least 10 nonmasked dimension this test could fail with probability p100 coin flips at most 9 heads 1 66e18 self assertgreatercountdimensionsmasked intntrials 0 1 self assertgreatercountdimensionsnotmasked intntrials 0 1 def testcomputemaskindicesoverlapself batchsize 4 sequencelength 80 maskprob 0 5 masklength 4 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice because of overlap mask don t have to add up exactly to maskprob sequencelength but have to be smaller or equal for batchsum in mask sumaxis1 self asserttrueintbatchsum maskprob sequencelength def testcomputemaskindicesattnmaskoverlapself batchsize 4 sequencelength 80 maskprob 0 5 masklength 4 attentionmask torch onesbatchsize sequencelength dtypetorch long devicetorchdevice attentionmask 2 sequencelength 2 0 mask computemaskindices batchsize sequencelength maskprob masklength attentionmaskattentionmask mask torch fromnumpymask totorchdevice for batchsum in mask sumaxis1 self asserttrueintbatchsum maskprob sequencelength self asserttruemask 2 sequencelength 2 sum 0 def testcomputemaskindicesshortaudioself batchsize 4 sequencelength 100 maskprob 0 05 masklength 10 attentionmask torch onesbatchsize sequencelength dtypetorch long devicetorchdevice force one example to be heavily padded attentionmask0 5 0 mask computemaskindices batchsize sequencelength maskprob masklength attentionmaskattentionmask minmasks2 make sure that nonpadded examples cannot be padded self assertfalsemask0attentionmask0 totorch bool cpu any requiretorch requiresoundfile slow class data2vecaudiomodelintegrationtestunittest testcase def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid filter lambda x xid in f1272141231000i for i in rangenumsamples numsamplesaudio return xarray for x in speechsamples def loadsuperbself task numsamples ds loaddatasetantonlsuperbdummy task splittest return ds numsamples def testinferencectcnormalself model data2vecaudioforctc frompretrainedfacebookdata2vecaudiobase960h model totorchdevice processor wav2vec2processor frompretrainedhfinternaltestingtinyrandomwav2vec2 dolowercasetrue inputspeech self loaddatasamples1 inputvalues processorinputspeech returntensorspt inputvalues totorchdevice with torch nograd logits modelinputvalues logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist self assertlistequalpredictedtrans expectedtranscriptions def testinferencectcbatchedself model data2vecaudioforctc frompretrainedfacebookdata2vecaudiobase960h totorchdevice processor wav2vec2processor frompretrainedhfinternaltestingtinyrandomwav2vec2 dolowercasetrue inputspeech self loaddatasamples4 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice with torch nograd logits modelinputvalues logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist sweat covered brion s body trickling into the tight loin cloth that was the only garment he wore the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with thousands of spectators were trivialities not worth thinking about his instant of panic was followed by a small sharp blow high on his chest self assertlistequalpredictedtrans expectedtranscriptions coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch data2vecaudio model speech is longer test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 pad input make sure that dropout is disabled pad input make sure that dropout is disabled pad input freeze feature encoder pad input it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf freeze everything but the classification head pad input freeze everything but the classification head pad input data2vecaudio has no inputs_embeds input_ids is renamed to input_values data2vecaudio cannot resize token embeddings since it has no tokens embeddings data2vecaudio has no inputs_embeds and thus the get_input_embeddings fn is not implemented non robust architecture does not exist in flax non robust architecture does not exist in flax no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common with these settings num_masked_spans 0 5 which means probabilistic rounding ensures that in 5 out of 10 method calls num_masked_spans 0 and in the other 5 out of 10 cases num_masked_spans 1 as we test for at least 10 masked dimension and at least 10 non masked dimension this test could fail with probability p 100 coin flips at most 9 heads 1 66e 18 because of overlap mask don t have to add up exactly to mask_prob sequence_length but have to be smaller or equal force one example to be heavily padded make sure that non padded examples cannot be padded automatic decoding with librispeech
import math import unittest import numpy as np from datasets import load_dataset from tests.test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from transformers import Data2VecAudioConfig, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Data2VecAudioForAudioFrameClassification, Data2VecAudioForCTC, Data2VecAudioForSequenceClassification, Data2VecAudioForXVector, Data2VecAudioModel, Wav2Vec2Processor, ) from transformers.models.data2vec.modeling_data2vec_audio import _compute_mask_indices class Data2VecAudioModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=16, feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return Data2VecAudioConfig( hidden_size=self.hidden_size, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = Data2VecAudioModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Data2VecAudioModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Data2VecAudioModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_batch_inference(self, config, input_values, *args): model = Data2VecAudioModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Data2VecAudioForCTC(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Data2VecAudioForSequenceClassification(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Data2VecAudioForCTC(config=config) model.to(torch_device) model.train() model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Data2VecAudioForSequenceClassification(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Data2VecAudioForXVector(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Data2VecAudioForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Data2VecAudioModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Data2VecAudioForCTC, Data2VecAudioModel, Data2VecAudioForSequenceClassification, Data2VecAudioForAudioFrameClassification, Data2VecAudioForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Data2VecAudioForSequenceClassification, "automatic-speech-recognition": Data2VecAudioForCTC, "feature-extraction": Data2VecAudioModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Data2VecAudioModelTester(self) self.config_tester = ConfigTester(self, config_class=Data2VecAudioConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): pass @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Data2VecAudioForCTC.from_pretrained( "hf-internal-testing/tiny-random-data2vec-seq-class", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Data2VecAudioForCTC.from_pretrained( "facebook/data2vec-audio-base-960h", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 299, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Data2VecAudioModel.from_pretrained("facebook/data2vec-audio-base") self.assertIsNotNone(model) @require_torch class Data2VecAudioUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) @require_torch @require_soundfile @slow class Data2VecAudioModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_ctc_normal(self): model = Data2VecAudioForCTC.from_pretrained("facebook/data2vec-audio-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_batched(self): model = Data2VecAudioForCTC.from_pretrained("facebook/data2vec-audio-base-960h").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with thousands of spectators were trivialities not worth thinking about", "his instant of panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch data2vecaudio model import unittest from tests testmodelingcommon import floatstensor idstensor randomattentionmask from transformers import data2vectextconfig istorchavailable from transformers testingutils import testcaseplus requiretorch slow torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import data2vectextforcausallm data2vectextformaskedlm data2vectextformultiplechoice data2vectextforquestionanswering data2vectextforsequenceclassification data2vectextfortokenclassification data2vectextmodel from transformers models data2vec modelingdata2vectext import data2vectextpretrainedmodelarchivelist data2vectextfortextembeddings createpositionidsfrominputids class data2vectextmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize sequencelabels none tokenlabels none choicelabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize tokenlabels idstensorself batchsize self seqlength self numlabels choicelabels idstensorself batchsize self numchoices config self getconfig return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels def getconfigself return data2vectextconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize initializerrangeself initializerrange def prepareconfigandinputsfordecoderself config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels self prepareconfigandinputs config isdecoder true encoderhiddenstates floatstensorself batchsize self seqlength self hiddensize encoderattentionmask idstensorself batchsize self seqlength vocabsize2 return config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask def createandcheckmodel self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model data2vectextmodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckmodelasdecoder self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask config addcrossattention true model data2vectextmodelconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask result model inputids attentionmaskinputmask tokentypeidstokentypeids encoderhiddenstatesencoderhiddenstates result modelinputids attentionmaskinputmask tokentypeidstokentypeids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckforcausallm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask model data2vectextforcausallmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckdecodermodelpastlargeinputs self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask config isdecoder true config addcrossattention true model data2vectextforcausallmconfigconfig totorchdevice eval make sure that ids don t start with pad token mask inputids neconfig padtokenid long inputids inputids mask first forward pass outputs model inputids attentionmaskinputmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask usecachetrue pastkeyvalues outputs pastkeyvalues create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize make sure that ids don t start with pad token mask nexttokens neconfig padtokenid long nexttokens nexttokens mask nextmask idstensorself batchsize 3 vocabsize2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catinputmask nextmask dim1 outputfromnopast model nextinputids attentionmasknextattentionmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask outputhiddenstatestrue hiddenstates0 outputfrompast model nexttokens attentionmasknextattentionmask encoderhiddenstatesencoderhiddenstates encoderattentionmaskencoderattentionmask pastkeyvaluespastkeyvalues outputhiddenstatestrue hiddenstates0 select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def createandcheckformaskedlm self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model data2vectextformaskedlmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckfortokenclassification self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numlabels self numlabels model data2vectextfortokenclassificationconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self numlabels def createandcheckformultiplechoice self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels config numchoices self numchoices model data2vectextformultiplechoiceconfigconfig model totorchdevice model eval multiplechoiceinputsids inputids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoicetokentypeids tokentypeids unsqueeze1 expand1 self numchoices 1 contiguous multiplechoiceinputmask inputmask unsqueeze1 expand1 self numchoices 1 contiguous result model multiplechoiceinputsids attentionmaskmultiplechoiceinputmask tokentypeidsmultiplechoicetokentypeids labelschoicelabels self parent assertequalresult logits shape self batchsize self numchoices def createandcheckforquestionanswering self config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels model data2vectextforquestionansweringconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids startpositionssequencelabels endpositionssequencelabels self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class data2vectextmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses data2vectextforcausallm data2vectextformaskedlm data2vectextmodel data2vectextforsequenceclassification data2vectextfortokenclassification data2vectextformultiplechoice data2vectextforquestionanswering if istorchavailable else allgenerativemodelclasses data2vectextforcausallm if istorchavailable else pipelinemodelmapping featureextraction data2vectextmodel fillmask data2vectextformaskedlm questionanswering data2vectextforquestionanswering textclassification data2vectextforsequenceclassification textgeneration data2vectextforcausallm tokenclassification data2vectextfortokenclassification zeroshot data2vectextforsequenceclassification if istorchavailable else modelsplitpercents 0 5 0 9 def setupself self modeltester data2vectextmodeltesterself self configtester configtesterself configclassdata2vectextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testmodelasdecoderself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckmodelasdecoderconfigandinputs def testmodelasdecoderwithdefaultinputmaskself this regression test was failing with pytorch 1 3 config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask self modeltester prepareconfigandinputsfordecoder inputmask none self modeltester createandcheckmodelasdecoder config inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels encoderhiddenstates encoderattentionmask def testforcausallmself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckforcausallmconfigandinputs def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testdecodermodelpastwithlargeinputsrelativeposembself configandinputs self modeltester prepareconfigandinputsfordecoder configandinputs0 positionembeddingtype relativekey self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testfortokenclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckfortokenclassificationconfigandinputs def testformultiplechoiceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformultiplechoiceconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs slow def testmodelfrompretrainedself for modelname in data2vectextpretrainedmodelarchivelist 1 model data2vectextmodel frompretrainedmodelname self assertisnotnonemodel def testcreatepositionidsrespectspaddingindexself config self modeltester prepareconfigandinputs0 model data2vectextfortextembeddingsconfigconfig inputids torch astensor12 31 13 model paddingidx expectedpositions torch astensor 0 model paddingidx 1 1 model paddingidx 1 2 model paddingidx 1 model paddingidx positionids createpositionidsfrominputidsinputids model paddingidx self assertequalpositionids shape expectedpositions shape self asserttruetorch alltorch eqpositionids expectedpositions def testcreatepositionidsfrominputsembedsself config self modeltester prepareconfigandinputs0 embeddings data2vectextfortextembeddingsconfigconfig inputsembeds torch empty2 4 30 expectedsinglepositions 0 embeddings paddingidx 1 1 embeddings paddingidx 1 2 embeddings paddingidx 1 3 embeddings paddingidx 1 expectedpositions torch astensorexpectedsinglepositions expectedsinglepositions positionids embeddings createpositionidsfrominputsembedsinputsembeds self assertequalpositionids shape expectedpositions shape self asserttruetorch alltorch eqpositionids expectedpositions requiretorch class data2vectextmodelintegrationtesttestcaseplus slow def testinferencemaskedlmself model data2vectextformaskedlm frompretrainedfacebookdata2vectextbase inputids torch tensor0 31414 232 328 740 1140 12695 69 46078 1588 2 with torch nograd output modelinputids0 expectedshape torch size1 11 50265 self assertequaloutput shape expectedshape compare the actual values for a slice expectedslice torch tensor0 2328 0 0000 1 1710 2 2525 0 0000 1 9937 2 1280 0 0000 1 8691 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 slow def testinferencenoheadself model data2vectextmodel frompretrainedfacebookdata2vectextbase inputids torch tensor0 31414 232 328 740 1140 12695 69 46078 1588 2 with torch nograd output modelinputids0 compare the actual values for a slice expectedslice torch tensor 0 1998 0 0379 0 0024 0 0971 0 2214 0 1798 0 0789 0 2400 0 1898 self asserttruetorch allcloseoutput 3 3 expectedslice atol1e4 coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch data2vecaudio model make sure that ids don t start with pad token first forward pass create hypothetical multiple next token and extent to next_input_ids make sure that ids don t start with pad token append to next input_ids and select random slice test that outputs are equal for slice this regression test was failing with pytorch 1 3 ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is data2vectextfortextembeddings padding_idx 1 ensure that the default position ids only assign a sequential this is a regression test for https github com huggingface transformers issues 1761 the position ids should be masked with the embedding object s padding index therefore the first available non padding position index is data2vectextfortextembeddings padding_idx 1 compare the actual values for a slice compare the actual values for a slice
import unittest from tests.test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from transformers import Data2VecTextConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Data2VecTextForCausalLM, Data2VecTextForMaskedLM, Data2VecTextForMultipleChoice, Data2VecTextForQuestionAnswering, Data2VecTextForSequenceClassification, Data2VecTextForTokenClassification, Data2VecTextModel, ) from transformers.models.data2vec.modeling_data2vec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, Data2VecTextForTextEmbeddings, create_position_ids_from_input_ids, ) class Data2VecTextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return Data2VecTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Data2VecTextModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = Data2VecTextModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = Data2VecTextForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = Data2VecTextForCausalLM(config=config).to(torch_device).eval() mask = input_ids.ne(config.pad_token_id).long() input_ids = input_ids * mask outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) mask = next_tokens.ne(config.pad_token_id).long() next_tokens = next_tokens * mask next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Data2VecTextForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = Data2VecTextForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = Data2VecTextForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Data2VecTextForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class Data2VecTextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Data2VecTextForCausalLM, Data2VecTextForMaskedLM, Data2VecTextModel, Data2VecTextForSequenceClassification, Data2VecTextForTokenClassification, Data2VecTextForMultipleChoice, Data2VecTextForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (Data2VecTextForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Data2VecTextModel, "fill-mask": Data2VecTextForMaskedLM, "question-answering": Data2VecTextForQuestionAnswering, "text-classification": Data2VecTextForSequenceClassification, "text-generation": Data2VecTextForCausalLM, "token-classification": Data2VecTextForTokenClassification, "zero-shot": Data2VecTextForSequenceClassification, } if is_torch_available() else {} ) model_split_percents = [0.5, 0.9] def setUp(self): self.model_tester = Data2VecTextModelTester(self) self.config_tester = ConfigTester(self, config_class=Data2VecTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Data2VecTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_create_position_ids_respects_padding_index(self): config = self.model_tester.prepare_config_and_inputs()[0] model = Data2VecTextForTextEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): config = self.model_tester.prepare_config_and_inputs()[0] embeddings = Data2VecTextForTextEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) @require_torch class Data2VecTextModelIntegrationTest(TestCasePlus): @slow def test_inference_masked_lm(self): model = Data2VecTextForMaskedLM.from_pretrained("facebook/data2vec-text-base") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 50265)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[[0.2328, 0.0000, 1.1710], [2.2525, 0.0000, 1.9937], [2.1280, 0.0000, 1.8691]]]) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_no_head(self): model = Data2VecTextModel.from_pretrained("facebook/data2vec-text-base") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_slice = torch.tensor( [[[0.1998, -0.0379, 0.0024], [-0.0971, -0.2214, -0.1798], [-0.0789, -0.2400, -0.1898]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch data2vecvision model import unittest from transformers import data2vecvisionconfig from transformers models auto import getvalues from transformers testingutils import requiretorch requiretorchmultigpu requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import modelmapping data2vecvisionforimageclassification data2vecvisionforsemanticsegmentation data2vecvisionmodel from transformers models data2vec modelingdata2vecvision import data2vecvisionpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import beitimageprocessor class data2vecvisionmodeltester def init self parent vocabsize100 batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 numlabels3 scopenone outindices0 1 2 3 self parent parent self vocabsize 100 self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self outindices outindices self numlabels numlabels in beit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none pixellabels none if self uselabels labels idstensorself batchsize self typesequencelabelsize pixellabels idstensorself batchsize self imagesize self imagesize self numlabels config self getconfig return config pixelvalues labels pixellabels def getconfigself return data2vecvisionconfig vocabsizeself vocabsize imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange outindicesself outindices def createandcheckmodelself config pixelvalues labels pixellabels model data2vecvisionmodelconfigconfig model totorchdevice model eval result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token numpatches self imagesize self patchsize 2 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize def createandcheckforimageclassificationself config pixelvalues labels pixellabels config numlabels self typesequencelabelsize model data2vecvisionforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize def createandcheckforimagesegmentationself config pixelvalues labels pixellabels config numlabels self numlabels model data2vecvisionforsemanticsegmentationconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result logits shape self batchsize self numlabels self imagesize 2 self imagesize 2 result modelpixelvalues labelspixellabels self parent assertequal result logits shape self batchsize self numlabels self imagesize 2 self imagesize 2 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels pixellabels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class data2vecvisionmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses data2vecvisionmodel data2vecvisionforimageclassification data2vecvisionforsemanticsegmentation if istorchavailable else pipelinemodelmapping featureextraction data2vecvisionmodel imageclassification data2vecvisionforimageclassification imagesegmentation data2vecvisionforsemanticsegmentation if istorchavailable else testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester data2vecvisionmodeltesterself self configtester configtester self configclassdata2vecvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests def testinputsembedsself data2vecvision does not use inputsembeds pass requiretorchmultigpu unittest skip reasondata2vecvision has some layers using addmodule which doesn t work well with nn dataparallel def testmultigpudataparallelforwardself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforimagesegmentationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimagesegmentationconfigandinputs def testtrainingself if not self modeltester istraining return config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses if modelclass in getvaluesmodelmapping continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testtraininggradientcheckpointingself config inputsdict self modeltester prepareconfigandinputsforcommon if not self modeltester istraining return config usecache false config returndict true for modelclass in self allmodelclasses if modelclass in getvaluesmodelmapping or not modelclass supportsgradientcheckpointing continue todo remove the following 3 lines once we have a modelforsemanticsegmentationmapping this can then be incorporated into prepareforclass in testmodelingcommon py elif modelclass name data2vecvisionforsemanticsegmentation batchsize numchannels height width inputsdictpixelvalues shape inputsdictlabels torch zeros self modeltester batchsize height width devicetorchdevice long model modelclassconfig model gradientcheckpointingenable model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters we skip lambda parameters as these require special initial values determined by config layerscaleinitvalue if lambda in name continue if param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def checkpttfoutputsself tfoutputs ptoutputs modelclass tol2e4 nameoutputs attributesnone we override with a slightly higher tol value as semseg models tend to diverge a bit more super checkpttfoutputstfoutputs ptoutputs modelclass tol name attributes def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in data2vecvisionpretrainedmodelarchivelist 1 model data2vecvisionmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class data2vecvisionmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return beitimageprocessor frompretrainedfacebookdata2vecvisionbaseft1k if isvisionavailable else none slow def testinferenceimageclassificationheadimagenet1kself model data2vecvisionforimageclassification frompretrainedfacebookdata2vecvisionbaseft1k to torchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs logits outputs logits verify the logits expectedshape torch size1 1000 self assertequallogits shape expectedshape expectedslice torch tensor0 3277 0 1395 0 0911 totorchdevice self asserttruetorch allcloselogits0 3 expectedslice atol1e4 expectedtop2 model config label2idi for i in remote control remote tabby tabby cat self assertequallogits0 topk2 indices cpu tolist expectedtop2 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch data2vecvision model in beit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as data2vecvision does not use input_ids inputs_embeds attention_mask and seq_length data2vecvision does not use inputs_embeds todo remove the following 3 lines once we have a model_for_semantic_segmentation_mapping this can then be incorporated into _prepare_for_class in test_modeling_common py we skip lambda parameters as these require special initial values determined by config layer_scale_init_value we override with a slightly higher tol value as semseg models tend to diverge a bit more we will verify our results on an image of cute cats forward pass verify the logits
import unittest from transformers import Data2VecVisionConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, Data2VecVisionForImageClassification, Data2VecVisionForSemanticSegmentation, Data2VecVisionModel, ) from transformers.models.data2vec.modeling_data2vec_vision import DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class Data2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = Data2VecVisionModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) num_patches = (self.image_size // self.patch_size) ** 2 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = Data2VecVisionForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = Data2VecVisionForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Data2VecVisionModel, Data2VecVisionForImageClassification, Data2VecVisionForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": Data2VecVisionModel, "image-classification": Data2VecVisionForImageClassification, "image-segmentation": Data2VecVisionForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Data2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): pass @require_torch_multi_gpu @unittest.skip( reason="Data2VecVision has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in [*get_values(MODEL_MAPPING)]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class in [*get_values(MODEL_MAPPING)] or not model_class.supports_gradient_checkpointing: continue elif model_class.__name__ == "Data2VecVisionForSemanticSegmentation": batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Data2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class Data2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = Data2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 1000)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([0.3277, -0.1395, 0.0911]).to(torch_device) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4)) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(logits[0].topk(2).indices.cpu().tolist(), expected_top2)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow data2vecvision model from future import annotations import collections abc import inspect import unittest import numpy as np from transformers import data2vecvisionconfig from transformers fileutils import cachedproperty istfavailable isvisionavailable from transformers testingutils import requiretf requirevision slow from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfdata2vecvisionforimageclassification tfdata2vecvisionforsemanticsegmentation tfdata2vecvisionmodel from transformers models data2vec modelingtfdata2vecvision import tfdata2vecvisionpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import beitimageprocessor class tfdata2vecvisionmodeltester def init self parent vocabsize100 batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 numlabels3 scopenone outindices0 1 2 3 self parent parent self vocabsize 100 self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self outindices outindices self numlabels numlabels def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none pixellabels none if self uselabels labels idstensorself batchsize self typesequencelabelsize pixellabels idstensorself batchsize self imagesize self imagesize self numlabels config self getconfig return config pixelvalues labels pixellabels def getconfigself return data2vecvisionconfig vocabsizeself vocabsize imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange outindicesself outindices def createandcheckmodelself config pixelvalues labels pixellabels model tfdata2vecvisionmodelconfigconfig result modelpixelvalues trainingfalse expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize if isinstanceself imagesize collections abc iterable else self imagesize self imagesize patchsize self patchsize if isinstanceself imagesize collections abc iterable else self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize def createandcheckforimageclassificationself config pixelvalues labels pixellabels config numlabels self typesequencelabelsize model tfdata2vecvisionforimageclassificationconfig result modelpixelvalues labelslabels trainingfalse self parent assertequalresult logits shape self batchsize self typesequencelabelsize def createandcheckforimagesegmentationself config pixelvalues labels pixellabels config numlabels self numlabels model tfdata2vecvisionforsemanticsegmentationconfig result modelpixelvalues trainingfalse self parent assertequal result logits shape self batchsize self numlabels self imagesize 2 self imagesize 2 result modelpixelvalues labelspixellabels self parent assertequal result logits shape self batchsize self numlabels self imagesize 2 self imagesize 2 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels pixellabels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict def prepareconfigandinputsforkerasfitself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues labels tf zerosself batchsize return config inputsdict requiretf class tfdata2vecvisionmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfdata2vecvisionmodel tfdata2vecvisionforimageclassification tfdata2vecvisionforsemanticsegmentation if istfavailable else pipelinemodelmapping featureextraction tfdata2vecvisionmodel imageclassification tfdata2vecvisionforimageclassification if istfavailable else testpruning false testonnx false testresizeembeddings false testheadmasking false def setupself self modeltester tfdata2vecvisionmodeltesterself self configtester configtester self configclassdata2vecvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasondata2vecvision does not use inputsembeds def testinputsembedsself data2vecvision does not use inputsembeds pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings tf keras layers layer x model getoutputembeddings self asserttruex is none or isinstancex tf keras layers layer def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforimagesegmentationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimagesegmentationconfigandinputs def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true in data2vecvision the seqlen equals the number of patches 1 we add 1 for the cls token imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize patchsize self modeltester patchsize if isinstanceself modeltester patchsize collections abc iterable else self modeltester patchsize self modeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 1 encoderseqlength getattrself modeltester encoderseqlength seqlen encoderkeylength getattrself modeltester keylength encoderseqlength chunklength getattrself modeltester chunklength none if chunklength is not none and hasattrself modeltester numhashes encoderseqlength encoderseqlength self modeltester numhashes for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass trainingfalse self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers data2vecvision has a different seqlength imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize patchsize self modeltester patchsize if isinstanceself modeltester patchsize collections abc iterable else self modeltester patchsize self modeltester patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlength numpatches 1 self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass overriding this method since the base method won t be compatible with data2vecvision slow def testkerasfitself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses since tfdata2vecvisionmodel cannot operate with the default fit method if modelclass name tfdata2vecvisionmodel model modelclassconfig if getattrmodel hfcomputeloss none test that model correctly compute the loss with kwargs preparedforclass self modeltester prepareconfigandinputsforkerasfit labelnames labels self assertgreaterlenlabelnames 0 msgno matching label names found labels key val for key val in preparedforclass items if key in labelnames inputsminuslabels key val for key val in preparedforclass items if key not in labelnames self assertgreaterleninputsminuslabels 0 model compileoptimizertf keras optimizers sgd0 0 runeagerlytrue make sure the model fits without crashing regardless of where we pass the labels history1 model fit preparedforclass validationdatapreparedforclass stepsperepoch1 validationsteps1 shufflefalse valloss1 history1 historyvalloss0 history2 model fit inputsminuslabels labels validationdatainputsminuslabels labels stepsperepoch1 validationsteps1 shufflefalse valloss2 history2 historyvalloss0 self asserttruenp allclosevalloss1 valloss2 atol1e2 rtol1e3 def checkpttfoutputsself tfoutputs ptoutputs modelclass tol2e4 nameoutputs attributesnone we override with a slightly higher tol value as semseg models tend to diverge a bit more super checkpttfoutputstfoutputs ptoutputs modelclass tol name attributes overriding this method since the base method won t be compatible with data2vecvision def testlosscomputationself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses since tfdata2vecvisionmodel won t have labels against which we could compute loss if modelclass name tfdata2vecvisionmodel model modelclassconfig if getattrmodel hfcomputeloss none the number of elements in the loss should be the same as the number of elements in the label preparedforclass self modeltester prepareconfigandinputsforkerasfit addedlabel preparedforclass sortedpreparedforclass keys inputsdict keys reversetrue0 losssize tf sizeaddedlabel test that model correctly compute the loss with kwargs possibleinputnames inputids pixelvalues inputfeatures inputname possibleinputnames intersectionsetpreparedforclass pop modelinput preparedforclass popinputname loss modelmodelinput preparedforclass0 self assertequalloss shape losssize test that model correctly compute the loss with a dict preparedforclass self modeltester prepareconfigandinputsforkerasfit loss modelpreparedforclass0 self assertequalloss shape losssize test that model correctly compute the loss with a tuple labelkeys preparedforclass keys inputsdict keys signature inspect signaturemodel call parameters signaturenames listsignature keys create a dictionary holding the location of the tensors in the tuple tupleindexmapping 0 inputname for labelkey in labelkeys labelkeyindex signaturenames indexlabelkey tupleindexmappinglabelkeyindex labelkey sortedtupleindexmapping sortedtupleindexmapping items initialize a list with their default values update the values and convert to a tuple listinput for name in signaturenames if name kwargs listinput appendsignaturename default for index value in sortedtupleindexmapping listinputindex preparedforclassvalue tupleinput tuplelistinput send to model loss modeltupleinput 10 self assertequalloss shape losssize def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in tfdata2vecvisionpretrainedmodelarchivelist 1 model tfdata2vecvisionmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretf requirevision class tfdata2vecvisionmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return beitimageprocessor frompretrainedfacebookdata2vecvisionbaseft1k if isvisionavailable else none slow def testinferenceimageclassificationheadimagenet1kself model tfdata2vecvisionforimageclassification frompretrainedfacebookdata2vecvisionbaseft1k imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorstf forward pass outputs modelinputs logits outputs logits verify the logits expectedshape tf converttotensor1 1000 self assertequallogits shape expectedshape expectedslice tf converttotensor0 3277 0 1395 0 0911 tf debugging assertnearlogits0 3 expectedslice atol1e4 expectedtop2 model config label2idi for i in remote control remote tabby tabby cat self assertequaltf nn topkoutputs logits0 2 indices numpy tolist expectedtop2 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow data2vecvision model expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as data2vecvision does not use input_ids inputs_embeds attention_mask and seq_length data2vecvision does not use inputs_embeds signature parameters is an ordereddict so arg_names order is deterministic in data2vecvision the seq_len equals the number of patches 1 we add 1 for the cls token check that output_attentions also work using config check attention is always last and order is fine data2vecvision has a different seq_length check that output_hidden_states also work using config overriding this method since the base method won t be compatible with data2vecvision since tfdata2vecvisionmodel cannot operate with the default fit method test that model correctly compute the loss with kwargs make sure the model fits without crashing regardless of where we pass the labels we override with a slightly higher tol value as semseg models tend to diverge a bit more overriding this method since the base method won t be compatible with data2vecvision since tfdata2vecvisionmodel won t have labels against which we could compute loss the number of elements in the loss should be the same as the number of elements in the label test that model correctly compute the loss with kwargs test that model correctly compute the loss with a dict test that model correctly compute the loss with a tuple create a dictionary holding the location of the tensors in the tuple initialize a list with their default values update the values and convert to a tuple send to model we will verify our results on an image of cute cats forward pass verify the logits
from __future__ import annotations import collections.abc import inspect import unittest import numpy as np from transformers import Data2VecVisionConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation, TFData2VecVisionModel, ) from transformers.models.data2vec.modeling_tf_data2vec_vision import ( TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class TFData2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = TFData2VecVisionModel(config=config) result = model(pixel_values, training=False) image_size = ( self.image_size if isinstance(self.image_size, collections.abc.Iterable) else (self.image_size, self.image_size) ) patch_size = ( self.patch_size if isinstance(self.image_size, collections.abc.Iterable) else (self.patch_size, self.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = TFData2VecVisionForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFData2VecVisionForSemanticSegmentation(config) result = model(pixel_values, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def prepare_config_and_inputs_for_keras_fit(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, _, _ = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "labels": tf.zeros((self.batch_size))} return config, inputs_dict @require_tf class TFData2VecVisionModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFData2VecVisionModel, TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFData2VecVisionModel, "image-classification": TFData2VecVisionForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TFData2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Data2VecVision does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @slow def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() label_names = {"labels"} self.assertGreater(len(label_names), 0, msg="No matching label names found!") labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = { key: val for key, val in prepared_for_class.items() if key not in label_names } self.assertGreater(len(inputs_minus_labels), 0) model.compile(optimizer=tf.keras.optimizers.SGD(0.0), run_eagerly=True) history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3)) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] loss_size = tf.size(added_label) possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) loss = model(model_input, **prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() loss = model(**prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) loss = model(tuple_input[:-1])[0] self.assertEqual(loss.shape, [loss_size]) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFData2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFData2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = TFData2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") outputs = model(**inputs) logits = outputs.logits expected_shape = tf.convert_to_tensor([1, 1000]) self.assertEqual(logits.shape, expected_shape) expected_slice = tf.convert_to_tensor([0.3277, -0.1395, 0.0911]) tf.debugging.assert_near(logits[0, :3], expected_slice, atol=1e-4) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(tf.nn.top_k(outputs.logits[0], 2).indices.numpy().tolist(), expected_top2)
codingutf8 2018 microsoft s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license compare the actual values for a slice coding utf 8 2018 microsoft s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license compare the actual values for a slice
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class DebertaModelTester(object): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, relative_attention=False, position_biased_input=True, pos_att_type="None", num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.relative_attention = relative_attention self.position_biased_input = position_biased_input self.pos_att_type = pos_att_type self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DebertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def check_loss_output(self, result): self.parent.assertListEqual(list(result.loss.size()), []) def create_and_check_deberta_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DebertaModel(config=config) model.to(torch_device) model.eval() sequence_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)[0] sequence_output = model(input_ids, token_type_ids=token_type_ids)[0] sequence_output = model(input_ids)[0] self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size]) def create_and_check_deberta_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DebertaForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_deberta_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DebertaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels]) self.check_loss_output(result) def create_and_check_deberta_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DebertaForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_deberta_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DebertaForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class DebertaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_torchscript = False test_pruning = False test_head_masking = False is_encoder_decoder = False def setUp(self): self.model_tester = DebertaModelTester(self) self.config_tester = ConfigTester(self, config_class=DebertaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_deberta_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DebertaModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch @require_sentencepiece @require_tokenizers class DebertaModelIntegrationTest(unittest.TestCase): @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass @slow def test_inference_no_head(self): model = DebertaModel.from_pretrained("microsoft/deberta-base") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_slice = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4), f"{output[:, 1:4, 1:4]}")
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from __future__ import annotations import unittest from transformers import DebertaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, ) class TFDebertaModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.relative_attention = False self.max_relative_positions = -1 self.position_biased_input = True self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = DebertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, max_relative_positions=self.max_relative_positions, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDebertaModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDebertaForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDebertaForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDebertaForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDebertaForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFDebertaModel, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFDebertaModel, "fill-mask": TFDebertaForMaskedLM, "question-answering": TFDebertaForQuestionAnswering, "text-classification": TFDebertaForSequenceClassification, "token-classification": TFDebertaForTokenClassification, "zero-shot": TFDebertaForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDebertaModelTester(self) self.config_tester = ConfigTester(self, config_class=DebertaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFDebertaModel.from_pretrained("kamalkraj/deberta-base") self.assertIsNotNone(model) @require_tf class TFDeBERTaModelIntegrationTest(unittest.TestCase): @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass @slow def test_inference_no_head(self): model = TFDebertaModel.from_pretrained("kamalkraj/deberta-base") input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_slice = tf.constant( [ [ [-0.59855896, -0.80552566, -0.8462135], [1.4484025, -0.93483794, -0.80593085], [0.3122741, 0.00316059, -1.4131377], ] ] ) tf.debugging.assert_near(output[:, 1:4, 1:4], expected_slice, atol=1e-4)
codingutf8 2018 microsoft s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license compare the actual values for a slice coding utf 8 2018 microsoft s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license compare the actual values for a slice
import unittest from transformers import DebertaV2Config, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaV2ForMaskedLM, DebertaV2ForMultipleChoice, DebertaV2ForQuestionAnswering, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2Model, ) from transformers.models.deberta_v2.modeling_deberta_v2 import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class DebertaV2ModelTester(object): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, relative_attention=False, position_biased_input=True, pos_att_type="None", num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.relative_attention = relative_attention self.position_biased_input = position_biased_input self.pos_att_type = pos_att_type self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DebertaV2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, ) def check_loss_output(self, result): self.parent.assertListEqual(list(result.loss.size()), []) def create_and_check_deberta_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DebertaV2Model(config=config) model.to(torch_device) model.eval() sequence_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)[0] sequence_output = model(input_ids, token_type_ids=token_type_ids)[0] sequence_output = model(input_ids)[0] self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size]) def create_and_check_deberta_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DebertaV2ForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_deberta_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DebertaV2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels]) self.check_loss_output(result) def create_and_check_deberta_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DebertaV2ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_deberta_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DebertaV2ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_deberta_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DebertaV2ForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class DebertaV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DebertaV2Model, DebertaV2ForMaskedLM, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2ForQuestionAnswering, DebertaV2ForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": DebertaV2Model, "fill-mask": DebertaV2ForMaskedLM, "question-answering": DebertaV2ForQuestionAnswering, "text-classification": DebertaV2ForSequenceClassification, "token-classification": DebertaV2ForTokenClassification, "zero-shot": DebertaV2ForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_torchscript = False test_pruning = False test_head_masking = False is_encoder_decoder = False def setUp(self): self.model_tester = DebertaV2ModelTester(self) self.config_tester = ConfigTester(self, config_class=DebertaV2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_deberta_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DebertaV2Model.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch @require_sentencepiece @require_tokenizers class DebertaV2ModelIntegrationTest(unittest.TestCase): @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass @slow def test_inference_no_head(self): model = DebertaV2Model.from_pretrained("microsoft/deberta-v2-xlarge") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_slice = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4), f"{output[:, 1:4, 1:4]}")
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from __future__ import annotations import unittest from transformers import DebertaV2Config, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaV2ForMaskedLM, TFDebertaV2ForMultipleChoice, TFDebertaV2ForQuestionAnswering, TFDebertaV2ForSequenceClassification, TFDebertaV2ForTokenClassification, TFDebertaV2Model, ) class TFDebertaV2ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, relative_attention=False, position_biased_input=True, pos_att_type="None", num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.relative_attention = relative_attention self.position_biased_input = position_biased_input self.pos_att_type = pos_att_type self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = DebertaV2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDebertaV2Model(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDebertaV2ForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDebertaV2ForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDebertaV2ForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDebertaV2ForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFDebertaV2ForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFDebertaV2Model, TFDebertaV2ForMaskedLM, TFDebertaV2ForQuestionAnswering, TFDebertaV2ForMultipleChoice, TFDebertaV2ForSequenceClassification, TFDebertaV2ForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFDebertaV2Model, "fill-mask": TFDebertaV2ForMaskedLM, "question-answering": TFDebertaV2ForQuestionAnswering, "text-classification": TFDebertaV2ForSequenceClassification, "token-classification": TFDebertaV2ForTokenClassification, "zero-shot": TFDebertaV2ForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDebertaV2ModelTester(self) self.config_tester = ConfigTester(self, config_class=DebertaV2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFDebertaV2Model.from_pretrained("kamalkraj/deberta-v2-xlarge") self.assertIsNotNone(model) @require_tf class TFDeBERTaV2ModelIntegrationTest(unittest.TestCase): @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass @slow def test_inference_no_head(self): model = TFDebertaV2Model.from_pretrained("kamalkraj/deberta-v2-xlarge") input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_slice = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4], expected_slice, atol=1e-4)
codingutf8 2019 hugging face inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token pad tokenid 0 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 pad self assertequalvocabkeys1 unk self assertequalvocabkeys1 pad self assertequallenvocabkeys 30001 def testvocabsizeself self assertequalself gettokenizer vocabsize 30000 def testdolowercaseself fmt off sequence thello how n are you tokenstarget hello how are you fmt on tokenizer debertav2tokenizersamplevocab unktokenunk dolowercasetrue tokens tokenizer convertidstotokenstokenizer encodesequence addspecialtokensfalse self assertlistequaltokens tokenstarget rusttokenizer debertav2tokenizerfastsamplevocab unktokenunk dolowercasetrue rusttokens rusttokenizer convertidstotokensrusttokenizer encodesequence addspecialtokensfalse self assertlistequalrusttokens tokenstarget unittest skipthere is an inconsistency between slow and fast tokenizer due to a bug in the fast one def testsentencepiecetokenizeandconverttokenstostringself pass unittest skipthere is an inconsistency between slow and fast tokenizer due to a bug in the fast one def testsentencepiecetokenizeanddecodeself pass def testsplitbypunctself fmt off sequence i was born in 92000 and this is fals tokenstarget unk was born in 9 2000 and this is fal s unk fmt on tokenizer debertav2tokenizersamplevocab unktokenunk splitbypuncttrue tokens tokenizer convertidstotokenstokenizer encodesequence addspecialtokensfalse self assertlistequaltokens tokenstarget rusttokenizer debertav2tokenizerfastsamplevocab unktokenunk splitbypuncttrue rusttokens rusttokenizer convertidstotokensrusttokenizer encodesequence addspecialtokensfalse self assertlistequalrusttokens tokenstarget def testdolowercasesplitbypunctself fmt off sequence i was born in 92000 and this is fals tokenstarget i was born in 9 2000 and this is fal s unk fmt on tokenizer debertav2tokenizersamplevocab unktokenunk dolowercasetrue splitbypuncttrue tokens tokenizer convertidstotokenstokenizer encodesequence addspecialtokensfalse self assertlistequaltokens tokenstarget rusttokenizer debertav2tokenizerfast samplevocab unktokenunk dolowercasetrue splitbypuncttrue rusttokens rusttokenizer convertidstotokensrusttokenizer encodesequence addspecialtokensfalse self assertlistequalrusttokens tokenstarget def testdolowercasesplitbypunctfalseself fmt off sequence i was born in 92000 and this is fals tokenstarget i was born in 9 2000 and this is fal s unk fmt on tokenizer debertav2tokenizersamplevocab unktokenunk dolowercasetrue splitbypunctfalse tokens tokenizer convertidstotokenstokenizer encodesequence addspecialtokensfalse self assertlistequaltokens tokenstarget rusttokenizer debertav2tokenizerfast samplevocab unktokenunk dolowercasetrue splitbypunctfalse rusttokens rusttokenizer convertidstotokensrusttokenizer encodesequence addspecialtokensfalse self assertlistequalrusttokens tokenstarget def testdolowercasefalsesplitbypunctself fmt off sequence i was born in 92000 and this is fals tokenstarget unk was born in 9 2000 and this is fal s unk fmt on tokenizer debertav2tokenizersamplevocab unktokenunk dolowercasefalse splitbypuncttrue tokens tokenizer convertidstotokenstokenizer encodesequence addspecialtokensfalse self assertlistequaltokens tokenstarget rusttokenizer debertav2tokenizerfast samplevocab unktokenunk dolowercasefalse splitbypuncttrue rusttokens rusttokenizer convertidstotokensrusttokenizer encodesequence addspecialtokensfalse self assertlistequalrusttokens tokenstarget def testdolowercasefalsesplitbypunctfalseself fmt off sequence thello how n are you tokenstarget unk e unk o how unk re yo unk fmt on tokenizer debertav2tokenizersamplevocab unktokenunk dolowercasefalse splitbypunctfalse tokens tokenizer convertidstotokenstokenizer encodesequence addspecialtokensfalse self assertlistequaltokens tokenstarget rusttokenizer debertav2tokenizerfast samplevocab unktokenunk dolowercasefalse splitbypunctfalse rusttokens rusttokenizer convertidstotokensrusttokenizer encodesequence addspecialtokensfalse self assertlistequalrusttokens tokenstarget def testrustandpythonfulltokenizersself tokenizer self gettokenizer rusttokenizer self getrusttokenizer sequence i was born in 92000 and this is fals tokens tokenizer convertidstotokenstokenizer encodesequence addspecialtokensfalse rusttokens rusttokenizer convertidstotokensrusttokenizer encodesequence addspecialtokensfalse self assertlistequaltokens rusttokens ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids rusttokenizer self getrusttokenizer ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids def testfulltokenizerself sequence this is a test idstarget 13 1 4398 25 21 1289 tokenstarget t his is a test backtokenstarget unk his is a test tokenizer debertav2tokenizersamplevocab unktokenunk keepaccentstrue rusttokenizer debertav2tokenizerfastsamplevocab unktokenunk keepaccentstrue ids tokenizer encodesequence addspecialtokensfalse self assertlistequalids idstarget tokens tokenizer tokenizesequence self assertlistequaltokens tokenstarget backtokens tokenizer convertidstotokensids self assertlistequalbacktokens backtokenstarget rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalrustids idstarget rusttokens rusttokenizer tokenizesequence self assertlistequalrusttokens tokenstarget rustbacktokens rusttokenizer convertidstotokensrustids self assertlistequalrustbacktokens backtokenstarget fmt off sequence i was born in 92000 and this is fals idstarget 13 1 23 386 19 561 3050 15 17 48 25 8256 18 1 9 tokenstarget i was born in 9 2000 and this is fal s backtokenstarget unk was born in 9 2000 and this is fal s unk fmt on ids tokenizer encodesequence addspecialtokensfalse self assertlistequalids idstarget tokens tokenizer tokenizesequence self assertlistequaltokens tokenstarget backtokens tokenizer convertidstotokensids self assertlistequalbacktokens backtokenstarget rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalrustids idstarget rusttokens rusttokenizer tokenizesequence self assertlistequalrusttokens tokenstarget rustbacktokens rusttokenizer convertidstotokensrustids self assertlistequalrustbacktokens backtokenstarget def testsequencebuildersself tokenizer debertav2tokenizersamplevocab text tokenizer encodesequence builders text2 tokenizer encodemultisequence build encodedsentence tokenizer buildinputswithspecialtokenstext encodedpair tokenizer buildinputswithspecialtokenstext text2 self assertequaltokenizer clstokenid text tokenizer septokenid encodedsentence self assertequal tokenizer clstokenid text tokenizer septokenid text2 tokenizer septokenid encodedpair slow def testtokenizerintegrationself expectedencoding inputids 1 39867 36 19390 486 27 35052 81436 18 60685 1225 7 35052 81436 18 9367 16899 18 15937 53 594 773 18 16287 30465 36 15937 6 41139 38 36979 60763 191 6 34132 99 6 50538 390 43230 6 34132 2779 20850 14 699 1072 1194 36 382 10901 53 7 699 1072 2084 36 20422 630 53 19 105 3049 1896 1053 16899 1506 11 37978 4243 7 1237 31869 200 16566 654 6 35052 81436 7 55630 13593 4 2 1 26 15011 13 667 8 1053 18 23611 1237 72356 12820 34 104134 1209 35 13313 6627 21 202 347 7 164 2399 11 46 4485 4 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 5 1232 2864 15785 14951 105 5 8581 1250 4 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 tokentypeids 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamemicrosoftdebertav2xlarge revisionad6e42c1532ddf3a15c39246b63f5559d558b670 coding utf 8 2019 hugging face inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt skip
import unittest from transformers import DebertaV2Tokenizer, DebertaV2TokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class DebertaV2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = DebertaV2Tokenizer rust_tokenizer_class = DebertaV2TokenizerFast test_sentencepiece = True test_sentencepiece_ignore_case = True def setUp(self): super().setUp() tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>") tokenizer.save_pretrained(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_text = "this is a test" output_text = "this is a test" return input_text, output_text def test_convert_token_and_id(self): token = "<pad>" token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<pad>") self.assertEqual(vocab_keys[1], "<unk>") self.assertEqual(vocab_keys[-1], "[PAD]") self.assertEqual(len(vocab_keys), 30_001) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 30_000) def test_do_lower_case(self): sequence = " \tHeLLo!how \n Are yoU? " tokens_target = ["▁hello", "!", "how", "▁are", "▁you", "?"] tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=True) tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(tokens, tokens_target) rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=True) rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(rust_tokens, tokens_target) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.") def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.") def test_sentencepiece_tokenize_and_decode(self): pass def test_split_by_punct(self): sequence = "I was born in 92000, and this is falsé." tokens_target = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", split_by_punct=True) tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(tokens, tokens_target) rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, unk_token="<unk>", split_by_punct=True) rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(rust_tokens, tokens_target) def test_do_lower_case_split_by_punct(self): sequence = "I was born in 92000, and this is falsé." tokens_target = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=True, split_by_punct=True) tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(tokens, tokens_target) rust_tokenizer = DebertaV2TokenizerFast( SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=True, split_by_punct=True ) rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(rust_tokens, tokens_target) def test_do_lower_case_split_by_punct_false(self): sequence = "I was born in 92000, and this is falsé." tokens_target = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=True, split_by_punct=False) tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(tokens, tokens_target) rust_tokenizer = DebertaV2TokenizerFast( SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=True, split_by_punct=False ) rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(rust_tokens, tokens_target) def test_do_lower_case_false_split_by_punct(self): sequence = "I was born in 92000, and this is falsé." tokens_target = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=False, split_by_punct=True) tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(tokens, tokens_target) rust_tokenizer = DebertaV2TokenizerFast( SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=False, split_by_punct=True ) rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(rust_tokens, tokens_target) def test_do_lower_case_false_split_by_punct_false(self): sequence = " \tHeLLo!how \n Are yoU? " tokens_target = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=False, split_by_punct=False) tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(tokens, tokens_target) rust_tokenizer = DebertaV2TokenizerFast( SAMPLE_VOCAB, unk_token="<unk>", do_lower_case=False, split_by_punct=False ) rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(rust_tokens, tokens_target) def test_rust_and_python_full_tokenizers(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False)) rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False)) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_full_tokenizer(self): sequence = "This is a test" ids_target = [13, 1, 4398, 25, 21, 1289] tokens_target = ["▁", "T", "his", "▁is", "▁a", "▁test"] back_tokens_target = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token="<unk>", keep_accents=True) rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, unk_token="<unk>", keep_accents=True) ids = tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, ids_target) tokens = tokenizer.tokenize(sequence) self.assertListEqual(tokens, tokens_target) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual(back_tokens, back_tokens_target) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(rust_ids, ids_target) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(rust_tokens, tokens_target) rust_back_tokens = rust_tokenizer.convert_ids_to_tokens(rust_ids) self.assertListEqual(rust_back_tokens, back_tokens_target) sequence = "I was born in 92000, and this is falsé." ids_target = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] tokens_target = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] back_tokens_target = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] ids = tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, ids_target) tokens = tokenizer.tokenize(sequence) self.assertListEqual(tokens, tokens_target) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual(back_tokens, back_tokens_target) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(rust_ids, ids_target) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(rust_tokens, tokens_target) rust_back_tokens = rust_tokenizer.convert_ids_to_tokens(rust_ids) self.assertListEqual(rust_back_tokens, back_tokens_target) def test_sequence_builders(self): tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB) text = tokenizer.encode("sequence builders") text_2 = tokenizer.encode("multi-sequence build") encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], encoded_sentence) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [tokenizer.sep_token_id], encoded_pair, ) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="microsoft/deberta-v2-xlarge", revision="ad6e42c1532ddf3a15c39246b63f5559d558b670", )
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch decisiontransformer model import inspect import unittest from transformers import decisiontransformerconfig istorchavailable from transformers testingutils import requiretorch slow torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import decisiontransformermodel from transformers models decisiontransformer modelingdecisiontransformer import decisiontransformerpretrainedmodelarchivelist class decisiontransformermodeltester def init self parent batchsize13 seqlength7 actdim6 statedim17 hiddensize23 maxlength11 istrainingtrue self parent parent self batchsize batchsize self seqlength seqlength self actdim actdim self statedim statedim self hiddensize hiddensize self maxlength maxlength self istraining istraining def prepareconfigandinputsself states floatstensorself batchsize self seqlength self statedim actions floatstensorself batchsize self seqlength self actdim rewards floatstensorself batchsize self seqlength 1 returnstogo floatstensorself batchsize self seqlength 1 timesteps idstensorself batchsize self seqlength vocabsize1000 attentionmask randomattentionmaskself batchsize self seqlength config self getconfig return config states actions rewards returnstogo timesteps attentionmask def getconfigself return decisiontransformerconfig batchsizeself batchsize seqlengthself seqlength actdimself actdim statedimself statedim hiddensizeself hiddensize maxlengthself maxlength def createandcheckmodel self config states actions rewards returnstogo timesteps attentionmask model decisiontransformermodelconfigconfig model totorchdevice model eval result modelstates actions rewards returnstogo timesteps attentionmask self parent assertequalresult statepreds shape states shape self parent assertequalresult actionpreds shape actions shape self parent assertequalresult returnpreds shape returnstogo shape self parent assertequal result lasthiddenstate shape self batchsize self seqlength 3 self hiddensize seq length 3 as there are 3 modelities states returns and actions def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config states actions rewards returnstogo timesteps attentionmask configandinputs inputsdict states states actions actions rewards rewards returnstogo returnstogo timesteps timesteps attentionmask attentionmask return config inputsdict requiretorch class decisiontransformermodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses decisiontransformermodel if istorchavailable else allgenerativemodelclasses pipelinemodelmapping featureextraction decisiontransformermodel if istorchavailable else ignoring of a failing test from generationtestermixin as the model does not use inputsids testgeneratewithoutinputids false ignoring of a failing tests from modeltestermixin as the model does not implement these features testpruning false testresizeembeddings false testheadmasking false testattentionoutputs false testhiddenstatesoutput false testinputsembeds false testmodelcommonattributes false testgradientcheckpointing false testtorchscript false def setupself self modeltester decisiontransformermodeltesterself self configtester configtesterself configclassdecisiontransformerconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs slow def testmodelfrompretrainedself for modelname in decisiontransformerpretrainedmodelarchivelist 1 model decisiontransformermodel frompretrainedmodelname self assertisnotnonemodel def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames states actions rewards returnstogo timesteps attentionmask self assertlistequalargnames lenexpectedargnames expectedargnames requiretorch class decisiontransformermodelintegrationtestunittest testcase slow def testautoregressivepredictionself numsteps 2 number of steps of autoregressive prediction we will perform targetreturn 10 defined by the rl environment may be normalized model decisiontransformermodel frompretrainededbeechingdecisiontransformergymhopperexpert model model totorchdevice config model config torch manualseed0 state torch randn1 1 config statedim todevicetorchdevice dtypetorch float32 env reset expectedoutputs torch tensor 0 242793 0 28693074 0 8742613 0 67815274 0 08101085 0 12952147 devicetorchdevice returnstogo torch tensortargetreturn devicetorchdevice dtypetorch float32 reshape1 1 1 states state actions torch zeros1 0 config actdim devicetorchdevice dtypetorch float32 rewards torch zeros1 0 devicetorchdevice dtypetorch float32 timesteps torch tensor0 devicetorchdevice dtypetorch long reshape1 1 for step in rangenumsteps actions torch catactions torch zeros1 1 config actdim devicetorchdevice dim1 rewards torch catrewards torch zeros1 1 devicetorchdevice dim1 attentionmask torch ones1 states shape1 todtypetorch long devicestates device with torch nograd actionpred model statesstates actionsactions rewardsrewards returnstogoreturnstogo timestepstimesteps attentionmaskattentionmask returndictfalse self assertequalactionpred shape actions shape self asserttruetorch allcloseactionpred0 1 expectedoutputsstep atol1e4 state reward env stepaction torch randn1 1 config statedim todevicetorchdevice dtypetorch float32 1 0 false actions1 actionpred0 1 states torch catstates state dim1 predreturn returnstogo0 1 reward returnstogo torch catreturnstogo predreturn reshape1 1 1 dim1 timesteps torch cat timesteps torch ones1 1 devicetorchdevice dtypetorch long step 1 dim1 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch decisiontransformer model seq length 3 as there are 3 modelities states returns and actions ignoring of a failing test from generationtestermixin as the model does not use inputs_ids ignoring of a failing tests from modeltestermixin as the model does not implement these features signature parameters is an ordereddict so arg_names order is deterministic an integration test that performs autoregressive prediction of state action and return from a sequence of state actions and returns test is performed over two timesteps number of steps of autoregressive prediction we will perform defined by the rl environment may be normalized env reset env step action
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class DecisionTransformerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, act_dim=6, state_dim=17, hidden_size=23, max_length=11, is_training=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.act_dim = act_dim self.state_dim = state_dim self.hidden_size = hidden_size self.max_length = max_length self.is_training = is_training def prepare_config_and_inputs(self): states = floats_tensor((self.batch_size, self.seq_length, self.state_dim)) actions = floats_tensor((self.batch_size, self.seq_length, self.act_dim)) rewards = floats_tensor((self.batch_size, self.seq_length, 1)) returns_to_go = floats_tensor((self.batch_size, self.seq_length, 1)) timesteps = ids_tensor((self.batch_size, self.seq_length), vocab_size=1000) attention_mask = random_attention_mask((self.batch_size, self.seq_length)) config = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def get_config(self): return DecisionTransformerConfig( batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length, ) def create_and_check_model( self, config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ): model = DecisionTransformerModel(config=config) model.to(torch_device) model.eval() result = model(states, actions, rewards, returns_to_go, timesteps, attention_mask) self.parent.assertEqual(result.state_preds.shape, states.shape) self.parent.assertEqual(result.action_preds.shape, actions.shape) self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) = config_and_inputs inputs_dict = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class DecisionTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DecisionTransformerModel,) if is_torch_available() else () all_generative_model_classes = () pipeline_model_mapping = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} test_generate_without_input_ids = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_attention_outputs = False test_hidden_states_output = False test_inputs_embeds = False test_model_common_attributes = False test_gradient_checkpointing = False test_torchscript = False def setUp(self): self.model_tester = DecisionTransformerModelTester(self) self.config_tester = ConfigTester(self, config_class=DecisionTransformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DecisionTransformerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @require_torch class DecisionTransformerModelIntegrationTest(unittest.TestCase): @slow def test_autoregressive_prediction(self): NUM_STEPS = 2 TARGET_RETURN = 10 model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert") model = model.to(torch_device) config = model.config torch.manual_seed(0) state = torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32) expected_outputs = torch.tensor( [[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]], device=torch_device ) returns_to_go = torch.tensor(TARGET_RETURN, device=torch_device, dtype=torch.float32).reshape(1, 1, 1) states = state actions = torch.zeros(1, 0, config.act_dim, device=torch_device, dtype=torch.float32) rewards = torch.zeros(1, 0, device=torch_device, dtype=torch.float32) timesteps = torch.tensor(0, device=torch_device, dtype=torch.long).reshape(1, 1) for step in range(NUM_STEPS): actions = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=torch_device)], dim=1) rewards = torch.cat([rewards, torch.zeros(1, 1, device=torch_device)], dim=1) attention_mask = torch.ones(1, states.shape[1]).to(dtype=torch.long, device=states.device) with torch.no_grad(): _, action_pred, _ = model( states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False, ) self.assertEqual(action_pred.shape, actions.shape) self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1e-4)) state, reward, _, _ = ( torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32), 1.0, False, {}, ) actions[-1] = action_pred[0, -1] states = torch.cat([states, state], dim=1) pred_return = returns_to_go[0, -1] - reward returns_to_go = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1)], dim=1) timesteps = torch.cat( [timesteps, torch.ones((1, 1), device=torch_device, dtype=torch.long) * (step + 1)], dim=1 )
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting sizelongestedge maxresolution we re effectively not testing this p this function computes the expected height and width when providing images to deformabledetrimageprocessor assuming doresize is set to true with a scalar size prepare image and target encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify origsize verify size prepare image target and maskspath encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify masks verify origsize verify size coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting size longest_edge max_resolution we re effectively not testing this p this function computes the expected height and width when providing images to deformabledetrimageprocessor assuming do_resize is set to true with a scalar size prepare image and target encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify orig_size verify size prepare image target and masks_path encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify masks verify orig_size verify size
import json import pathlib import unittest from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class DeformableDetrImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DeformableDetrImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DeformableDetrImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = DeformableDetrImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) @slow def test_call_pytorch_with_coco_detection_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} image_processing = DeformableDetrImageProcessor() encoding = image_processing(images=image, annotations=target, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") image_processing = DeformableDetrImageProcessor(format="coco_panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_masks_sum = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch deformable detr model import inspect import math import unittest from typing import dict list tuple from transformers import deformabledetrconfig resnetconfig istorchavailable isvisionavailable from transformers fileutils import cachedproperty from transformers testingutils import requiretimm requiretorch requiretorchaccelerator requirevision slow torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import deformabledetrforobjectdetection deformabledetrmodel if isvisionavailable from pil import image from transformers import autoimageprocessor class deformabledetrmodeltester def init self parent batchsize8 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads8 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 numqueries12 numchannels3 imagesize196 ntargets8 numlabels91 numfeaturelevels4 encodernpoints2 decodernpoints6 self parent parent self batchsize batchsize self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self numqueries numqueries self numchannels numchannels self imagesize imagesize self ntargets ntargets self numlabels numlabels self numfeaturelevels numfeaturelevels self encodernpoints encodernpoints self decodernpoints decodernpoints we also set the expected seq length for both encoder and decoder self encoderseqlength math ceilself imagesize 8 2 math ceilself imagesize 16 2 math ceilself imagesize 32 2 math ceilself imagesize 64 2 self decoderseqlength self numqueries def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize pixelmask torch onesself batchsize self imagesize self imagesize devicetorchdevice labels none if self uselabels labels is a list of dict each dict being the labels for a given example in the batch labels for i in rangeself batchsize target targetclasslabels torch randint highself numlabels sizeself ntargets devicetorchdevice targetboxes torch randself ntargets 4 devicetorchdevice targetmasks torch randself ntargets self imagesize self imagesize devicetorchdevice labels appendtarget config self getconfig return config pixelvalues pixelmask labels def getconfigself resnetconfig resnetconfig numchannels3 embeddingssize10 hiddensizes10 20 30 40 depths1 1 2 1 hiddenactrelu numlabels3 outfeaturesstage2 stage3 stage4 outindices2 3 4 return deformabledetrconfig dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob numqueriesself numqueries numlabelsself numlabels numfeaturelevelsself numfeaturelevels encodernpointsself encodernpoints decodernpointsself decodernpoints usetimmbackbonefalse backboneconfigresnetconfig def prepareconfigandinputsforcommonself config pixelvalues pixelmask labels self prepareconfigandinputs inputsdict pixelvalues pixelvalues pixelmask pixelmask return config inputsdict def createandcheckdeformabledetrmodelself config pixelvalues pixelmask labels model deformabledetrmodelconfigconfig model totorchdevice model eval result modelpixelvaluespixelvalues pixelmaskpixelmask result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self numqueries self hiddensize def createandcheckdeformabledetrobjectdetectionheadmodelself config pixelvalues pixelmask labels model deformabledetrforobjectdetectionconfigconfig model totorchdevice model eval result modelpixelvaluespixelvalues pixelmaskpixelmask result modelpixelvalues self parent assertequalresult logits shape self batchsize self numqueries self numlabels self parent assertequalresult predboxes shape self batchsize self numqueries 4 result modelpixelvaluespixelvalues pixelmaskpixelmask labelslabels self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self numqueries self numlabels self parent assertequalresult predboxes shape self batchsize self numqueries 4 requiretorch class deformabledetrmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses deformabledetrmodel deformabledetrforobjectdetection if istorchavailable else pipelinemodelmapping featureextraction deformabledetrmodel objectdetection deformabledetrforobjectdetection if istorchavailable else isencoderdecoder true testtorchscript false testpruning false testheadmasking false testmissingkeys false special case for head models def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass name deformabledetrforobjectdetection labels for i in rangeself modeltester batchsize target targetclasslabels torch ones sizeself modeltester ntargets devicetorchdevice dtypetorch long targetboxes torch ones self modeltester ntargets 4 devicetorchdevice dtypetorch float targetmasks torch ones self modeltester ntargets self modeltester imagesize self modeltester imagesize devicetorchdevice dtypetorch float labels appendtarget inputsdictlabels labels return inputsdict def setupself self modeltester deformabledetrmodeltesterself self configtester configtesterself configclassdeformabledetrconfig hastextmodalityfalse def testconfigself we don t test commonproperties and argumentsinit as these don t apply for deformable detr self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams def testdeformabledetrmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdeformabledetrmodelconfigandinputs def testdeformabledetrobjectdetectionheadmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdeformabledetrobjectdetectionheadmodelconfigandinputs unittest skipreasondeformable detr does not use inputsembeds def testinputsembedsself pass unittest skipreasondeformable detr does not have a getinputembeddings method def testmodelcommonattributesself pass unittest skipreasondeformable detr is not a generative model def testgeneratewithoutinputidsself pass unittest skipreasondeformable detr does not use token embeddings def testresizetokensembeddingsself pass unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads self modeltester numfeaturelevels self modeltester encodernpoints outlen lenoutputs correctoutlen 8 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning object detection model returns predlogits and predboxes if modelclass name deformabledetrforobjectdetection correctoutlen 2 self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads self modeltester numqueries self modeltester numqueries cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads self modeltester numfeaturelevels self modeltester decodernpoints check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes elif self isencoderdecoder addedhiddenstates 2 else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads self modeltester numfeaturelevels self modeltester encodernpoints def testmodeloutputsequivalenceself config inputsdict self modeltester prepareconfigandinputsforcommon def setnantensortozerot tt t 0 return t def checkequivalencemodel tupleinputs dictinputs additionalkwargs with torch nograd tupleoutput modeltupleinputs returndictfalse additionalkwargs dictoutput modeldictinputs returndicttrue additionalkwargs totuple def recursivechecktupleobject dictobject if isinstancetupleobject list tuple for tupleiterablevalue dictiterablevalue in ziptupleobject dictobject recursivechecktupleiterablevalue dictiterablevalue elif isinstancetupleobject dict for tupleiterablevalue dictiterablevalue in zip tupleobject values dictobject values recursivechecktupleiterablevalue dictiterablevalue elif tupleobject is none return else self asserttrue torch allclose setnantensortozerotupleobject setnantensortozerodictobject atol1e5 msg tuple and dict output are not equal difference f torch maxtorch abstupleobject dictobject tuple has nan f torch isnantupleobject any and inf torch isinftupleobject dict has f nan torch isnandictobject any and inf torch isinfdictobject recursivechecktupleoutput dictoutput for modelclass in self allmodelclasses printmodel class modelclass model modelclassconfig model totorchdevice model eval tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputhiddenstates true tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputattentions true tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs outputhiddenstates true tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalencemodel tupleinputs dictinputs outputattentions true tupleinputs self prepareforclassinputsdict modelclass returnlabelstrue dictinputs self prepareforclassinputsdict modelclass returnlabelstrue checkequivalence model tupleinputs dictinputs outputhiddenstates true outputattentions true def testretaingradhiddenstatesattentionsself removed retaingrad and grad on decoderhiddenstates as queries don t require grad config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice inputs self prepareforclassinputsdict modelclass outputs modelinputs we take the second output since lasthiddenstate is the second item output outputs1 encoderhiddenstates outputs encoderhiddenstates0 encoderattentions outputs encoderattentions0 encoderhiddenstates retaingrad encoderattentions retaingrad decoderattentions outputs decoderattentions0 decoderattentions retaingrad crossattentions outputs crossattentions0 crossattentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnoneencoderhiddenstates grad self assertisnotnoneencoderattentions grad self assertisnotnonedecoderattentions grad self assertisnotnonecrossattentions grad def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys if model config isencoderdecoder expectedargnames pixelvalues pixelmask expectedargnames extend headmask decoderheadmask encoderoutputs if headmask and decoderheadmask in argnames else self assertlistequalargnames lenexpectedargnames expectedargnames else expectedargnames pixelvalues pixelmask self assertlistequalargnames 1 expectedargnames def testdifferenttimmbackboneself config inputsdict self modeltester prepareconfigandinputsforcommon let s pick a random timm backbone config backbone tfmobilenetv3small075 for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if modelclass name deformabledetrforobjectdetection expectedshape self modeltester batchsize self modeltester numqueries self modeltester numlabels self assertequaloutputs logits shape expectedshape self asserttrueoutputs def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses printmodel class modelclass model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad if param requiresgrad if levelembed in name or samplingoffsets bias in name or valueproj in name or outputproj in name or referencepoints in name continue self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def testtwostagetrainingself modelclass deformabledetrforobjectdetection config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true config twostage true config auxiliaryloss true config withboxrefine true model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward tolerance 1e4 we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretimm requirevision slow class deformabledetrmodelintegrationtestsunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedsensetimedeformabledetr if isvisionavailable else none def testinferenceobjectdetectionheadself model deformabledetrforobjectdetection frompretrainedsensetimedeformabledetr totorchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice pixelvalues encodingpixelvalues totorchdevice pixelmask encodingpixelmask totorchdevice with torch nograd outputs modelpixelvalues pixelmask expectedshapelogits torch size1 model config numqueries model config numlabels self assertequaloutputs logits shape expectedshapelogits expectedlogits torch tensor 9 6645 4 3449 5 8705 9 7035 3 8504 5 0724 10 5634 5 3379 7 5116 totorchdevice expectedboxes torch tensor 0 8693 0 2289 0 2492 0 3150 0 5489 0 5845 0 5563 0 7580 0 8518 totorchdevice self asserttruetorch allcloseoutputs logits0 3 3 expectedlogits atol1e4 expectedshapeboxes torch size1 model config numqueries 4 self assertequaloutputs predboxes shape expectedshapeboxes self asserttruetorch allcloseoutputs predboxes0 3 3 expectedboxes atol1e4 verify postprocessing results imageprocessor postprocessobjectdetection outputs threshold0 3 targetsizesimage size 1 0 expectedscores torch tensor0 7999 0 7894 0 6331 0 4720 0 4382 totorchdevice expectedlabels 17 17 75 75 63 expectedsliceboxes torch tensor16 5028 52 8390 318 2544 470 7841 totorchdevice self assertequallenresultsscores 5 self asserttruetorch allcloseresultsscores expectedscores atol1e4 self assertsequenceequalresultslabels tolist expectedlabels self asserttruetorch allcloseresultsboxes0 expectedsliceboxes def testinferenceobjectdetectionheadwithboxrefinetwostageself model deformabledetrforobjectdetection frompretrained sensetimedeformabledetrwithboxrefinetwostage totorchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice pixelvalues encodingpixelvalues totorchdevice pixelmask encodingpixelmask totorchdevice with torch nograd outputs modelpixelvalues pixelmask expectedshapelogits torch size1 model config numqueries model config numlabels self assertequaloutputs logits shape expectedshapelogits expectedlogits torch tensor 6 7108 4 3213 6 3777 8 9014 6 1799 6 7240 6 9315 4 4735 6 2298 totorchdevice expectedboxes torch tensor 0 2583 0 5499 0 4683 0 7652 0 9068 0 4882 0 5490 0 2763 0 0564 totorchdevice self asserttruetorch allcloseoutputs logits0 3 3 expectedlogits atol1e4 expectedshapeboxes torch size1 model config numqueries 4 self assertequaloutputs predboxes shape expectedshapeboxes self asserttruetorch allcloseoutputs predboxes0 3 3 expectedboxes atol1e4 requiretorchaccelerator def testinferenceobjectdetectionheadequivalencecpugpuself imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt pixelvalues encodingpixelvalues pixelmask encodingpixelmask 1 run model on cpu model deformabledetrforobjectdetection frompretrainedsensetimedeformabledetrsinglescale with torch nograd cpuoutputs modelpixelvalues pixelmask 2 run model on gpu model totorchdevice with torch nograd gpuoutputs modelpixelvalues totorchdevice pixelmask totorchdevice 3 assert equivalence for key in cpuoutputs keys assert torch allclosecpuoutputskey gpuoutputskey cpu atol1e4 expectedlogits torch tensor 9 9051 4 2541 6 4852 9 6947 4 0854 6 8033 10 0665 5 8470 7 7003 assert torch allclosecpuoutputs logits0 3 3 expectedlogits atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch deformable detr model we also set the expected seq length for both encoder and decoder labels is a list of dict each dict being the labels for a given example in the batch special case for head models we don t test common_properties and arguments_init as these don t apply for deformable detr check that output_attentions also work using config loss is at first position loss is added to beginning object detection model returns pred_logits and pred_boxes decoder attentions cross attentions check attention is always last and order is fine removed retain_grad and grad on decoder_hidden_states as queries don t require grad no need to test all models as different heads yield the same functionality we take the second output since last_hidden_state is the second item signature parameters is an ordereddict so arg_names order is deterministic let s pick a random timm backbone we will verify our results on an image of cute cats verify postprocessing 1 run model on cpu 2 run model on gpu 3 assert equivalence
import inspect import math import unittest from typing import Dict, List, Tuple from transformers import DeformableDetrConfig, ResNetConfig, is_torch_available, is_vision_available from transformers.file_utils import cached_property from transformers.testing_utils import ( require_timm, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DeformableDetrForObjectDetection, DeformableDetrModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class DeformableDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, image_size=196, n_targets=8, num_labels=91, num_feature_levels=4, encoder_n_points=2, decoder_n_points=6, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.image_size = image_size self.n_targets = n_targets self.num_labels = num_labels self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.encoder_seq_length = ( math.ceil(self.image_size / 8) ** 2 + math.ceil(self.image_size / 16) ** 2 + math.ceil(self.image_size / 32) ** 2 + math.ceil(self.image_size / 64) ** 2 ) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) labels = None if self.use_labels: labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return DeformableDetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, num_feature_levels=self.num_feature_levels, encoder_n_points=self.encoder_n_points, decoder_n_points=self.decoder_n_points, use_timm_backbone=False, backbone_config=resnet_config, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_deformable_detr_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size)) def create_and_check_deformable_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": DeformableDetrModel, "object-detection": DeformableDetrForObjectDetection} if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "DeformableDetrForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.image_size, self.model_tester.image_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = DeformableDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=DeformableDetrConfig, has_text_modality=False) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() def test_deformable_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_model(*config_and_inputs) def test_deformable_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="Deformable DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Deformable DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="Deformable DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Deformable DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) out_len = len(outputs) correct_outlen = 8 if "labels" in inputs_dict: correct_outlen += 1 if model_class.__name__ == "DeformableDetrForObjectDetection": correct_outlen += 2 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.decoder_n_points, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[1] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DeformableDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if param.requires_grad: if ( "level_embed" in name or "sampling_offsets.bias" in name or "value_proj" in name or "output_proj" in name or "reference_points" in name ): continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_two_stage_training(self): model_class = DeformableDetrForObjectDetection config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True config.two_stage = True config.auxiliary_loss = True config.with_box_refine = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() TOLERANCE = 1e-4 def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class DeformableDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") if is_vision_available() else None def test_inference_object_detection_head(self): model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]] ).to(torch_device) expected_boxes = torch.tensor( [[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.7999, 0.7894, 0.6331, 0.4720, 0.4382]).to(torch_device) expected_labels = [17, 17, 75, 75, 63] expected_slice_boxes = torch.tensor([16.5028, 52.8390, 318.2544, 470.7841]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes)) def test_inference_object_detection_head_with_box_refine_two_stage(self): model = DeformableDetrForObjectDetection.from_pretrained( "SenseTime/deformable-detr-with-box-refine-two-stage" ).to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]] ).to(torch_device) expected_boxes = torch.tensor( [[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) @require_torch_accelerator def test_inference_object_detection_head_equivalence_cpu_gpu(self): image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt") pixel_values = encoding["pixel_values"] pixel_mask = encoding["pixel_mask"] model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr-single-scale") with torch.no_grad(): cpu_outputs = model(pixel_values, pixel_mask) model.to(torch_device) with torch.no_grad(): gpu_outputs = model(pixel_values.to(torch_device), pixel_mask.to(torch_device)) for key in cpu_outputs.keys(): assert torch.allclose(cpu_outputs[key], gpu_outputs[key].cpu(), atol=1e-4) expected_logits = torch.tensor( [[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]] ) assert torch.allclose(cpu_outputs.logits[0, :3, :3], expected_logits, atol=1e-4)
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import DeiTImageProcessor class DeiTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 20, "width": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DeiTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DeiTImageProcessor if is_vision_available() else None test_cast_dtype = True def setUp(self): self.image_processor_tester = DeiTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch deit model import unittest import warnings from transformers import deitconfig from transformers models auto import getvalues from transformers testingutils import requireaccelerate requiretorch requiretorchaccelerator requiretorchfp16 requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import modelforimageclassificationmapping modelforsequenceclassificationmapping modelmapping deitforimageclassification deitforimageclassificationwithteacher deitformaskedimagemodeling deitmodel from transformers models deit modelingdeit import deitpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import deitimageprocessor class deitmodeltester def init self parent batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 numlabels3 scopenone encoderstride2 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self encoderstride encoderstride in deit the seq length equals the number of patches 2 we add 2 for the cls and distilation tokens numpatches imagesize patchsize 2 self seqlength numpatches 2 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return deitconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange encoderstrideself encoderstride def createandcheckmodelself config pixelvalues labels model deitmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckformaskedimagemodelingself config pixelvalues labels model deitformaskedimagemodelingconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result reconstruction shape self batchsize self numchannels self imagesize self imagesize test greyscale images config numchannels 1 model deitformaskedimagemodelingconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult reconstruction shape self batchsize 1 self imagesize self imagesize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model deitforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model deitforimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class deitmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses deitmodel deitforimageclassification deitforimageclassificationwithteacher deitformaskedimagemodeling if istorchavailable else pipelinemodelmapping featureextraction deitmodel imageclassification deitforimageclassification deitforimageclassificationwithteacher if istorchavailable else testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester deitmodeltesterself self configtester configtesterself configclassdeitconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasondeit does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testformaskedimagemodelingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedimagemodelingconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs special case for deitforimageclassificationwithteacher model def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass name deitforimageclassificationwithteacher del inputsdictlabels return inputsdict def testtrainingself if not self modeltester istraining return config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses deitforimageclassificationwithteacher supports inferenceonly if modelclass in getvaluesmodelmapping or modelclass name deitforimageclassificationwithteacher continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward def testtraininggradientcheckpointingself config inputsdict self modeltester prepareconfigandinputsforcommon if not self modeltester istraining return config usecache false config returndict true for modelclass in self allmodelclasses if modelclass in getvaluesmodelmapping or not modelclass supportsgradientcheckpointing continue deitforimageclassificationwithteacher supports inferenceonly if modelclass name deitforimageclassificationwithteacher continue model modelclassconfig model gradientcheckpointingenable model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testproblemtypesself config inputsdict self modeltester prepareconfigandinputsforcommon problemtypes title multilabelclassification numlabels 2 dtype torch float title singlelabelclassification numlabels 1 dtype torch long title regression numlabels 1 dtype torch float for modelclass in self allmodelclasses if modelclass not in getvaluesmodelforsequenceclassificationmapping getvaluesmodelforimageclassificationmapping or modelclass name deitforimageclassificationwithteacher continue for problemtype in problemtypes with self subtestmsgftesting modelclass with problemtype title config problemtype problemtypetitle config numlabels problemtypenumlabels model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue if problemtypenumlabels 1 inputslabels inputslabels unsqueeze1 repeat1 problemtypenumlabels inputslabels inputslabels toproblemtypedtype this tests that we do not trigger the warning form pytorch using a target size that is different to the input size this will likely lead to incorrect results due to broadcasting please ensure they have the same size which is a symptom something in wrong for the regression problem see https github comhuggingfacetransformersissues11780 with warnings catchwarningsrecordtrue as warninglist loss modelinputs loss for w in warninglist if using a target size that is different to the input size in strw message raise valueerror fsomething is going wrong in the regression problem intercepted w message loss backward slow def testmodelfrompretrainedself for modelname in deitpretrainedmodelarchivelist 1 model deitmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class deitmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return deitimageprocessor frompretrainedfacebookdeitbasedistilledpatch16224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model deitforimageclassificationwithteacher frompretrainedfacebookdeitbasedistilledpatch16224 to torchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor1 0266 0 1912 1 2861 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 slow requireaccelerate requiretorchaccelerator requiretorchfp16 def testinferencefp16self r a small test to make sure that inference work in half precision without any problem forward pass to make sure inference works in fp16 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch deit model in deit the seq length equals the number of patches 2 we add 2 for the cls and distilation tokens test greyscale images test greyscale images here we also overwrite some of the tests of test_modeling_common py as deit does not use input_ids inputs_embeds attention_mask and seq_length special case for deitforimageclassificationwithteacher model deitforimageclassificationwithteacher supports inference only deitforimageclassificationwithteacher supports inference only this tests that we do not trigger the warning form pytorch using a target size that is different to the input size this will likely lead to incorrect results due to broadcasting please ensure they have the same size which is a symptom something in wrong for the regression problem see https github com huggingface transformers issues 11780 we will verify our results on an image of cute cats forward pass verify the logits a small test to make sure that inference work in half precision without any problem forward pass to make sure inference works in fp16
import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class DeiTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, encoder_stride=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.encoder_stride = encoder_stride num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return DeiTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = DeiTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = DeiTForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) config.num_channels = 1 model = DeiTForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = DeiTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = DeiTForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DeiTModelTester(self) self.config_tester = ConfigTester(self, config_class=DeiTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if ( model_class in get_values(MODEL_MAPPING) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: continue if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def test_model_from_pretrained(self): for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DeiTModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class DeiTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.0266, 0.1912, -1.2861]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r model = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224", torch_dtype=torch.float16, device_map="auto" ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) with torch.no_grad(): _ = model(pixel_values)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow deit model from future import annotations import inspect import unittest import numpy as np from transformers import deitconfig from transformers testingutils import requiretf requirevision slow from transformers utils import cachedproperty istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfdeitforimageclassification tfdeitforimageclassificationwithteacher tfdeitformaskedimagemodeling tfdeitmodel from transformers models deit modelingtfdeit import tfdeitpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import deitimageprocessor class tfdeitmodeltester def init self parent batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 numlabels3 scopenone encoderstride2 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self encoderstride encoderstride in deit the seq length equals the number of patches 2 we add 2 for the cls and distilation tokens numpatches imagesize patchsize 2 self seqlength numpatches 2 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return deitconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange encoderstrideself encoderstride def createandcheckmodelself config pixelvalues labels model tfdeitmodelconfigconfig result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckformaskedimagemodelingself config pixelvalues labels model tfdeitformaskedimagemodelingconfigconfig result modelpixelvalues self parent assertequal result reconstruction shape self batchsize self numchannels self imagesize self imagesize test greyscale images config numchannels 1 model tfdeitformaskedimagemodelingconfig pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult reconstruction shape self batchsize 1 self imagesize self imagesize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model tfdeitforimageclassificationconfig result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model tfdeitforimageclassificationconfig pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfdeitmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfdeitmodel tfdeitforimageclassification tfdeitforimageclassificationwithteacher tfdeitformaskedimagemodeling if istfavailable else pipelinemodelmapping featureextraction tfdeitmodel imageclassification tfdeitforimageclassification tfdeitforimageclassificationwithteacher if istfavailable else testpruning false testresizeembeddings false testheadmasking false testonnx false def setupself self modeltester tfdeitmodeltesterself self configtester configtesterself configclassdeitconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasondeit does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings tf keras layers layer x model getoutputembeddings self asserttruex is none or isinstancex tf keras layers dense def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testformaskedimagemodelingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedimagemodelingconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs special case for deitforimageclassificationwithteacher model def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if labels in inputsdict and labels not in inspect signaturemodelclass call parameters del inputsdictlabels return inputsdict slow def testmodelfrompretrainedself for modelname in tfdeitpretrainedmodelarchivelist 1 model tfdeitmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretf requirevision class deitmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return deitimageprocessor frompretrainedfacebookdeitbasedistilledpatch16224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model tfdeitforimageclassificationwithteacher frompretrainedfacebookdeitbasedistilledpatch16224 imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorstf forward pass outputs modelinputs verify the logits expectedshape tf tensorshape1 1000 self assertequaloutputs logits shape expectedshape expectedslice tf constant1 0266 0 1912 1 2861 self asserttruenp allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow deit model in deit the seq length equals the number of patches 2 we add 2 for the cls and distilation tokens test greyscale images test greyscale images here we also overwrite some of the tests of test_modeling_tf_common py as deit does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic special case for deitforimageclassificationwithteacher model we will verify our results on an image of cute cats forward pass verify the logits
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class TFDeiTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, encoder_stride=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.encoder_stride = encoder_stride num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return DeiTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = TFDeiTModel(config=config) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = TFDeiTForMaskedImageModeling(config=config) result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) config.num_channels = 1 model = TFDeiTForMaskedImageModeling(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFDeiTForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = TFDeiTForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFDeiTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDeiTModelTester(self) self.config_tester = ConfigTester(self, config_class=DeiTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Dense)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def test_model_from_pretrained(self): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFDeiTModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class DeiTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") outputs = model(**inputs) expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-1.0266, 0.1912, -1.2861]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting sizelongestedge maxresolution we re effectively not testing this p this function computes the expected height and width when providing images to detaimageprocessor assuming doresize is set to true with a scalar size prepare image and target encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify origsize verify size prepare image target and maskspath encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify masks verify origsize verify size coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting size longest_edge max_resolution we re effectively not testing this p this function computes the expected height and width when providing images to detaimageprocessor assuming do_resize is set to true with a scalar size prepare image and target encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify orig_size verify size prepare image target and masks_path encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify masks verify orig_size verify size
import json import pathlib import unittest from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class DetaImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DetaImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DetaImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = DetaImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) @slow def test_call_pytorch_with_coco_detection_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} image_processing = DetaImageProcessor() encoding = image_processing(images=image, annotations=target, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") image_processing = DetaImageProcessor(format="coco_panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_masks_sum = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting sizelongestedge maxresolution we re effectively not testing this p this function computes the expected height and width when providing images to detrimageprocessor assuming doresize is set to true with a scalar size prepare image and target encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify origsize verify size prepare image target and maskspath encode them verify pixel values verify area verify boxes verify imageid verify iscrowd verify classlabels verify masks verify origsize verify size coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license by setting size longest_edge max_resolution we re effectively not testing this p this function computes the expected height and width when providing images to detrimageprocessor assuming do_resize is set to true with a scalar size prepare image and target encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify orig_size verify size prepare image target and masks_path encode them verify pixel values verify area verify boxes verify image_id verify is_crowd verify class_labels verify masks verify orig_size verify size
import json import pathlib import unittest from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class DetrImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_pad=True, ): size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DetrImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DetrImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = DetrImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_pad")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) @slow def test_call_pytorch_with_coco_detection_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} image_processing = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") encoding = image_processing(images=image, annotations=target, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") image_processing = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) expected_masks_sum = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch detr model import inspect import math import unittest from transformers import detrconfig resnetconfig istorchavailable isvisionavailable from transformers testingutils import requiretimm requiretorch requirevision slow torchdevice from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import detrforobjectdetection detrforsegmentation detrmodel if isvisionavailable from pil import image from transformers import detrimageprocessor class detrmodeltester def init self parent batchsize8 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads8 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 numqueries12 numchannels3 minsize200 maxsize200 ntargets8 numlabels91 self parent parent self batchsize batchsize self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self numqueries numqueries self numchannels numchannels self minsize minsize self maxsize maxsize self ntargets ntargets self numlabels numlabels we also set the expected seq length for both encoder and decoder self encoderseqlength math ceilself minsize 32 math ceilself maxsize 32 self decoderseqlength self numqueries def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self minsize self maxsize pixelmask torch onesself batchsize self minsize self maxsize devicetorchdevice labels none if self uselabels labels is a list of dict each dict being the labels for a given example in the batch labels for i in rangeself batchsize target targetclasslabels torch randint highself numlabels sizeself ntargets devicetorchdevice targetboxes torch randself ntargets 4 devicetorchdevice targetmasks torch randself ntargets self minsize self maxsize devicetorchdevice labels appendtarget config self getconfig return config pixelvalues pixelmask labels def getconfigself resnetconfig resnetconfig numchannels3 embeddingssize10 hiddensizes10 20 30 40 depths1 1 2 1 hiddenactrelu numlabels3 outfeaturesstage2 stage3 stage4 outindices2 3 4 return detrconfig dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob numqueriesself numqueries numlabelsself numlabels usetimmbackbonefalse backboneconfigresnetconfig def prepareconfigandinputsforcommonself config pixelvalues pixelmask labels self prepareconfigandinputs inputsdict pixelvalues pixelvalues pixelmask pixelmask return config inputsdict def createandcheckdetrmodelself config pixelvalues pixelmask labels model detrmodelconfigconfig model totorchdevice model eval result modelpixelvaluespixelvalues pixelmaskpixelmask result modelpixelvalues self parent assertequal result lasthiddenstate shape self batchsize self decoderseqlength self hiddensize def createandcheckdetrobjectdetectionheadmodelself config pixelvalues pixelmask labels model detrforobjectdetectionconfigconfig model totorchdevice model eval result modelpixelvaluespixelvalues pixelmaskpixelmask result modelpixelvalues self parent assertequalresult logits shape self batchsize self numqueries self numlabels 1 self parent assertequalresult predboxes shape self batchsize self numqueries 4 result modelpixelvaluespixelvalues pixelmaskpixelmask labelslabels self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self numqueries self numlabels 1 self parent assertequalresult predboxes shape self batchsize self numqueries 4 requiretorch class detrmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses detrmodel detrforobjectdetection detrforsegmentation if istorchavailable else pipelinemodelmapping featureextraction detrmodel imagesegmentation detrforsegmentation objectdetection detrforobjectdetection if istorchavailable else isencoderdecoder true testtorchscript false testpruning false testheadmasking false testmissingkeys false special case for head models def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass name in detrforobjectdetection detrforsegmentation labels for i in rangeself modeltester batchsize target targetclasslabels torch ones sizeself modeltester ntargets devicetorchdevice dtypetorch long targetboxes torch ones self modeltester ntargets 4 devicetorchdevice dtypetorch float targetmasks torch ones self modeltester ntargets self modeltester minsize self modeltester maxsize devicetorchdevice dtypetorch float labels appendtarget inputsdictlabels labels return inputsdict def setupself self modeltester detrmodeltesterself self configtester configtesterself configclassdetrconfig hastextmodalityfalse def testconfigself self configtester runcommontests def testdetrmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdetrmodelconfigandinputs def testdetrobjectdetectionheadmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdetrobjectdetectionheadmodelconfigandinputs todo check if this works again for pytorch 2 x y unittest skipreasongot cuda error misaligned address with pytorch 2 0 0 def testmultigpudataparallelforwardself pass unittest skipreasondetr does not use inputsembeds def testinputsembedsself pass unittest skipreasondetr does not have a getinputembeddings method def testmodelcommonattributesself pass unittest skipreasondetr is not a generative model def testgeneratewithoutinputidsself pass unittest skipreasondetr does not use token embeddings def testresizetokensembeddingsself pass slow def testmodeloutputsequivalenceself todo niels fix me pass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true decoderseqlength self modeltester decoderseqlength encoderseqlength self modeltester encoderseqlength decoderkeylength self modeltester decoderseqlength encoderkeylength self modeltester encoderseqlength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength outlen lenoutputs if self isencoderdecoder correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning object detection model returns predlogits and predboxes if modelclass name detrforobjectdetection correctoutlen 2 panoptic segmentation model returns predlogits predboxes predmasks if modelclass name detrforsegmentation correctoutlen 3 if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength encoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes elif self isencoderdecoder addedhiddenstates 2 else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength def testretaingradhiddenstatesattentionsself removed retaingrad and grad on decoderhiddenstates as queries don t require grad config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice inputs self prepareforclassinputsdict modelclass outputs modelinputs output outputs0 encoderhiddenstates outputs encoderhiddenstates0 encoderattentions outputs encoderattentions0 encoderhiddenstates retaingrad encoderattentions retaingrad decoderattentions outputs decoderattentions0 decoderattentions retaingrad crossattentions outputs crossattentions0 crossattentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnoneencoderhiddenstates grad self assertisnotnoneencoderattentions grad self assertisnotnonedecoderattentions grad self assertisnotnonecrossattentions grad def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys if model config isencoderdecoder expectedargnames pixelvalues pixelmask expectedargnames extend headmask decoderheadmask encoderoutputs if headmask and decoderheadmask in argnames else self assertlistequalargnames lenexpectedargnames expectedargnames else expectedargnames pixelvalues pixelmask self assertlistequalargnames 1 expectedargnames def testdifferenttimmbackboneself config inputsdict self modeltester prepareconfigandinputsforcommon let s pick a random timm backbone config backbone tfmobilenetv3small075 for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if modelclass name detrforobjectdetection expectedshape self modeltester batchsize self modeltester numqueries self modeltester numlabels 1 self assertequaloutputs logits shape expectedshape self asserttrueoutputs def testgreyscaleimagesself config inputsdict self modeltester prepareconfigandinputsforcommon use greyscale pixel values inputsdictpixelvalues floatstensor self modeltester batchsize 1 self modeltester minsize self modeltester maxsize let s set numchannels to 1 config numchannels 1 config backboneconfig numchannels 1 for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self asserttrueoutputs def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig configsnoinit initxavierstd 1e9 for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad if bboxattention in name and bias not in name self assertless 100000 absparam data max item msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized tolerance 1e4 we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretimm requirevision slow class detrmodelintegrationteststimmbackboneunittest testcase cachedproperty def defaultimageprocessorself return detrimageprocessor frompretrainedfacebookdetrresnet50 if isvisionavailable else none def testinferencenoheadself model detrmodel frompretrainedfacebookdetrresnet50 totorchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice with torch nograd outputs modelencoding expectedshape torch size1 100 256 assert outputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 0616 0 5146 0 4032 0 7629 0 4934 1 7153 0 4768 0 6403 0 7826 totorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 def testinferenceobjectdetectionheadself model detrforobjectdetection frompretrainedfacebookdetrresnet50 totorchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice pixelvalues encodingpixelvalues totorchdevice pixelmask encodingpixelmask totorchdevice with torch nograd outputs modelpixelvalues pixelmask verify outputs expectedshapelogits torch size1 model config numqueries model config numlabels 1 self assertequaloutputs logits shape expectedshapelogits expectedslicelogits torch tensor 19 1194 0 0893 11 0154 17 3640 1 8035 14 0219 20 0461 0 5837 11 1060 totorchdevice self asserttruetorch allcloseoutputs logits0 3 3 expectedslicelogits atol1e4 expectedshapeboxes torch size1 model config numqueries 4 self assertequaloutputs predboxes shape expectedshapeboxes expectedsliceboxes torch tensor 0 4433 0 5302 0 8853 0 5494 0 2517 0 0529 0 4998 0 5360 0 9956 totorchdevice self asserttruetorch allcloseoutputs predboxes0 3 3 expectedsliceboxes atol1e4 verify postprocessing results imageprocessor postprocessobjectdetection outputs threshold0 3 targetsizesimage size 1 0 expectedscores torch tensor0 9982 0 9960 0 9955 0 9988 0 9987 totorchdevice expectedlabels 75 75 63 17 17 expectedsliceboxes torch tensor40 1633 70 8115 175 5471 117 9841 totorchdevice self assertequallenresultsscores 5 self asserttruetorch allcloseresultsscores expectedscores atol1e4 self assertsequenceequalresultslabels tolist expectedlabels self asserttruetorch allcloseresultsboxes0 expectedsliceboxes def testinferencepanopticsegmentationheadself model detrforsegmentation frompretrainedfacebookdetrresnet50panoptic totorchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice pixelvalues encodingpixelvalues totorchdevice pixelmask encodingpixelmask totorchdevice with torch nograd outputs modelpixelvalues pixelmask verify outputs expectedshapelogits torch size1 model config numqueries model config numlabels 1 self assertequaloutputs logits shape expectedshapelogits expectedslicelogits torch tensor 18 1565 1 7568 13 5029 16 8888 1 4138 14 1028 17 5709 2 5080 11 8654 totorchdevice self asserttruetorch allcloseoutputs logits0 3 3 expectedslicelogits atol1e4 expectedshapeboxes torch size1 model config numqueries 4 self assertequaloutputs predboxes shape expectedshapeboxes expectedsliceboxes torch tensor 0 5344 0 1789 0 9285 0 4420 0 0572 0 0875 0 6630 0 6887 0 1017 totorchdevice self asserttruetorch allcloseoutputs predboxes0 3 3 expectedsliceboxes atol1e4 expectedshapemasks torch size1 model config numqueries 200 267 self assertequaloutputs predmasks shape expectedshapemasks expectedslicemasks torch tensor 7 7558 10 8788 11 9797 11 8881 16 4329 17 7451 14 7316 19 7383 20 3004 totorchdevice self asserttruetorch allcloseoutputs predmasks0 0 3 3 expectedslicemasks atol1e3 verify postprocessing results imageprocessor postprocesspanopticsegmentation outputs threshold0 3 targetsizesimage size 1 0 expectedshape torch size480 640 expectedslicesegmentation torch tensor4 4 4 4 4 4 4 4 4 dtypetorch int32 to torchdevice expectednumberofsegments 5 expectedfirstsegment id 1 labelid 17 wasfused false score 0 994096 numberofuniquesegments lentorch uniqueresultssegmentation self asserttrue numberofuniquesegments expectednumberofsegments 1 we add 1 for the background class self asserttrueresultssegmentation shape expectedshape self asserttruetorch allcloseresultssegmentation 3 3 expectedslicesegmentation atol1e4 self asserttruelenresultssegmentsinfo expectednumberofsegments self assertdictequalresultssegmentsinfo0 expectedfirstsegment requirevision requiretorch slow class detrmodelintegrationtestsunittest testcase cachedproperty def defaultimageprocessorself return detrimageprocessor frompretrainedfacebookdetrresnet50 revisionnotimm if isvisionavailable else none def testinferencenoheadself model detrmodel frompretrainedfacebookdetrresnet50 revisionnotimm totorchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice with torch nograd outputs modelencoding expectedshape torch size1 100 256 assert outputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 0616 0 5146 0 4032 0 7629 0 4934 1 7153 0 4768 0 6403 0 7826 totorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch detr model we also set the expected seq length for both encoder and decoder labels is a list of dict each dict being the labels for a given example in the batch special case for head models todo check if this works again for pytorch 2 x y todo niels fix me check that output_attentions also work using config loss is at first position loss is added to beginning object detection model returns pred_logits and pred_boxes panoptic segmentation model returns pred_logits pred_boxes pred_masks past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine removed retain_grad and grad on decoder_hidden_states as queries don t require grad no need to test all models as different heads yield the same functionality signature parameters is an ordereddict so arg_names order is deterministic let s pick a random timm backbone use greyscale pixel values let s set num_channels to 1 we will verify our results on an image of cute cats verify outputs verify postprocessing verify outputs verify postprocessing we add 1 for the background class
import inspect import math import unittest from transformers import DetrConfig, ResNetConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_timm, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DetrForObjectDetection, DetrForSegmentation, DetrModel if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class DetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=91, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return DetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, use_timm_backbone=False, backbone_config=resnet_config, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_detr_model(self, config, pixel_values, pixel_mask, labels): model = DetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = DetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DetrModel, DetrForObjectDetection, DetrForSegmentation, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": DetrModel, "image-segmentation": DetrForSegmentation, "object-detection": DetrForObjectDetection, } if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["DetrForObjectDetection", "DetrForSegmentation"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = DetrModelTester(self) self.config_tester = ConfigTester(self, config_class=DetrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_detr_model(*config_and_inputs) def test_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow def test_model_outputs_equivalence(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if model_class.__name__ == "DetrForObjectDetection": correct_outlen += 2 if model_class.__name__ == "DetrForSegmentation": correct_outlen += 3 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels + 1, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_greyscale_images(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict["pixel_values"] = floats_tensor( [self.model_tester.batch_size, 1, self.model_tester.min_size, self.model_tester.max_size] ) config.num_channels = 1 config.backbone_config.num_channels = 1 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class DetrModelIntegrationTestsTimmBackbone(unittest.TestCase): @cached_property def default_image_processor(self): return DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") if is_vision_available() else None def test_inference_no_head(self): model = DetrModel.from_pretrained("facebook/detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 100, 256)) assert outputs.last_hidden_state.shape == expected_shape expected_slice = torch.tensor( [[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_object_detection_head(self): model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-19.1194, -0.0893, -11.0154], [-17.3640, -1.8035, -14.0219], [-20.0461, -0.5837, -11.1060]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.4433, 0.5302, 0.8853], [0.5494, 0.2517, 0.0529], [0.4998, 0.5360, 0.9956]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.9982, 0.9960, 0.9955, 0.9988, 0.9987]).to(torch_device) expected_labels = [75, 75, 63, 17, 17] expected_slice_boxes = torch.tensor([40.1633, 70.8115, 175.5471, 117.9841]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes)) def test_inference_panoptic_segmentation_head(self): model = DetrForSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-18.1565, -1.7568, -13.5029], [-16.8888, -1.4138, -14.1028], [-17.5709, -2.5080, -11.8654]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.5344, 0.1789, 0.9285], [0.4420, 0.0572, 0.0875], [0.6630, 0.6887, 0.1017]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) expected_shape_masks = torch.Size((1, model.config.num_queries, 200, 267)) self.assertEqual(outputs.pred_masks.shape, expected_shape_masks) expected_slice_masks = torch.tensor( [[-7.7558, -10.8788, -11.9797], [-11.8881, -16.4329, -17.7451], [-14.7316, -19.7383, -20.3004]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_masks[0, 0, :3, :3], expected_slice_masks, atol=1e-3)) results = image_processor.post_process_panoptic_segmentation( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_shape = torch.Size([480, 640]) expected_slice_segmentation = torch.tensor([[4, 4, 4], [4, 4, 4], [4, 4, 4]], dtype=torch.int32).to( torch_device ) expected_number_of_segments = 5 expected_first_segment = {"id": 1, "label_id": 17, "was_fused": False, "score": 0.994096} number_of_unique_segments = len(torch.unique(results["segmentation"])) self.assertTrue( number_of_unique_segments, expected_number_of_segments + 1 ) self.assertTrue(results["segmentation"].shape, expected_shape) self.assertTrue(torch.allclose(results["segmentation"][:3, :3], expected_slice_segmentation, atol=1e-4)) self.assertTrue(len(results["segments_info"]), expected_number_of_segments) self.assertDictEqual(results["segments_info"][0], expected_first_segment) @require_vision @require_torch @slow class DetrModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return ( DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm") if is_vision_available() else None ) def test_inference_no_head(self): model = DetrModel.from_pretrained("facebook/detr-resnet-50", revision="no_timm").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 100, 256)) assert outputs.last_hidden_state.shape == expected_shape expected_slice = torch.tensor( [[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dinat model import collections import unittest from transformers import dinatconfig from transformers testingutils import requirenatten requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testbackbonecommon import backbonetestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import dinatbackbone dinatforimageclassification dinatmodel from transformers models dinat modelingdinat import dinatpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class dinatmodeltester def init self parent batchsize13 imagesize64 patchsize4 numchannels3 embeddim16 depths1 2 1 numheads2 4 8 kernelsize3 dilations3 1 2 1 mlpratio2 0 qkvbiastrue hiddendropoutprob0 0 attentionprobsdropoutprob0 0 droppathrate0 1 hiddenactgelu patchnormtrue initializerrange0 02 layernormeps1e5 istrainingtrue scopenone uselabelstrue numlabels10 outfeaturesstage1 stage2 outindices1 2 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self embeddim embeddim self depths depths self numheads numheads self kernelsize kernelsize self dilations dilations self mlpratio mlpratio self qkvbias qkvbias self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self droppathrate droppathrate self hiddenact hiddenact self patchnorm patchnorm self layernormeps layernormeps self initializerrange initializerrange self istraining istraining self scope scope self uselabels uselabels self numlabels numlabels self outfeatures outfeatures self outindices outindices def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself return dinatconfig numlabelsself numlabels imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels embeddimself embeddim depthsself depths numheadsself numheads kernelsizeself kernelsize dilationsself dilations mlpratioself mlpratio qkvbiasself qkvbias hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob droppathrateself droppathrate hiddenactself hiddenact patchnormself patchnorm layernormepsself layernormeps initializerrangeself initializerrange outfeaturesself outfeatures outindicesself outindices def createandcheckmodelself config pixelvalues labels model dinatmodelconfigconfig model totorchdevice model eval result modelpixelvalues expectedheight expectedwidth config imagesize config patchsize 2 lenconfig depths 1 expecteddim intconfig embeddim 2 lenconfig depths 1 self parent assertequal result lasthiddenstate shape self batchsize expectedheight expectedwidth expecteddim def createandcheckforimageclassificationself config pixelvalues labels model dinatforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self numlabels test greyscale images config numchannels 1 model dinatforimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self numlabels def createandcheckbackboneself config pixelvalues labels model dinatbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify hidden states self parent assertequallenresult featuremaps lenconfig outfeatures self parent assertlistequallistresult featuremaps0 shape self batchsize model channels0 16 16 verify channels self parent assertequallenmodel channels lenconfig outfeatures verify backbone works with outfeaturesnone config outfeatures none model dinatbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequallistresult featuremaps0 shape self batchsize model channels1 4 4 verify channels self parent assertequallenmodel channels 1 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requirenatten requiretorch class dinatmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses dinatmodel dinatforimageclassification dinatbackbone if istorchavailable else pipelinemodelmapping featureextraction dinatmodel imageclassification dinatforimageclassification if istorchavailable else fxcompatible false testtorchscript false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester dinatmodeltesterself self configtester configtesterself configclassdinatconfig embeddim37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs def testbackboneself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbackboneconfigandinputs unittest skipreasondinat does not use inputsembeds def testinputsembedsself pass unittest skipreasondinat does not use feedforward chunking def testfeedforwardchunkingself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testattentionoutputsself self skiptestdinat s attention operation is handled entirely by natten def checkhiddenstatesoutputself inputsdict config modelclass imagesize model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers lenself modeltester depths 1 self assertequallenhiddenstates expectednumlayers dinat has a different seqlength patchsize config patchsize if isinstanceconfig patchsize collections abc iterable else config patchsize config patchsize height imagesize0 patchsize0 width imagesize1 patchsize1 self assertlistequal listhiddenstates0 shape3 height width self modeltester embeddim if modelclass name dinatbackbone reshapedhiddenstates outputs reshapedhiddenstates self assertequallenreshapedhiddenstates expectednumlayers batchsize numchannels height width reshapedhiddenstates0 shape reshapedhiddenstates reshapedhiddenstates0 viewbatchsize numchannels height width permute0 2 3 1 self assertlistequal listreshapedhiddenstates shape3 height width self modeltester embeddim def testhiddenstatesoutputself config inputsdict self modeltester prepareconfigandinputsforcommon imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize for modelclass in self allmodelclasses inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize slow def testmodelfrompretrainedself for modelname in dinatpretrainedmodelarchivelist 1 model dinatmodel frompretrainedmodelname self assertisnotnonemodel def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if embeddings not in name and param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized requirenatten requirevision requiretorch class dinatmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedshilabsdinatminiin1k224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model dinatforimageclassification frompretrainedshilabsdinatminiin1k224 totorchdevice imageprocessor self defaultimageprocessor image image open testsfixturestestssamplescoco000000039769 png inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 1545 0 7667 0 4642 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 requiretorch requirenatten class dinatbackbonetestunittest testcase backbonetestermixin allmodelclasses dinatbackbone if istorchavailable else configclass dinatconfig def setupself self modeltester dinatmodeltesterself coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dinat model test greyscale images verify hidden states verify channels verify backbone works with out_features none verify feature maps verify channels dinat has a different seq_length check that output_hidden_states also work using config forward pass verify the logits
import collections import unittest from transformers import DinatConfig from transformers.testing_utils import require_natten, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import DinatBackbone, DinatForImageClassification, DinatModel from transformers.models.dinat.modeling_dinat import DINAT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class DinatModelTester: def __init__( self, parent, batch_size=13, image_size=64, patch_size=4, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 4, 8], kernel_size=3, dilations=[[3], [1, 2], [1]], mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, num_labels=10, out_features=["stage1", "stage2"], out_indices=[1, 2], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.kernel_size = kernel_size self.dilations = dilations self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.num_labels = num_labels self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DinatConfig( num_labels=self.num_labels, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, kernel_size=self.kernel_size, dilations=self.dilations, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, patch_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = DinatModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = (config.image_size // config.patch_size) // (2 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, expected_height, expected_width, expected_dim) ) def create_and_check_for_image_classification(self, config, pixel_values, labels): model = DinatForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) config.num_channels = 1 model = DinatForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = DinatBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], 16, 16]) self.parent.assertEqual(len(model.channels), len(config.out_features)) config.out_features = None model = DinatBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[-1], 4, 4]) self.parent.assertEqual(len(model.channels), 1) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_natten @require_torch class DinatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DinatModel, DinatForImageClassification, DinatBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": DinatModel, "image-classification": DinatForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_torchscript = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DinatModelTester(self) self.config_tester = ConfigTester(self, config_class=DinatConfig, embed_dim=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @unittest.skip(reason="Dinat does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Dinat does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_attention_outputs(self): self.skipTest("Dinat's attention operation is handled entirely by NATTEN.") def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) height = image_size[0] // patch_size[0] width = image_size[1] // patch_size[1] self.assertListEqual( list(hidden_states[0].shape[-3:]), [height, width, self.model_tester.embed_dim], ) if model_class.__name__ != "DinatBackbone": reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height, width).permute(0, 2, 3, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-3:]), [height, width, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) @slow def test_model_from_pretrained(self): for model_name in DINAT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DinatModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_natten @require_vision @require_torch class DinatModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("shi-labs/dinat-mini-in1k-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = DinatForImageClassification.from_pretrained("shi-labs/dinat-mini-in1k-224").to(torch_device) image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.1545, -0.7667, 0.4642]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @require_torch @require_natten class DinatBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (DinatBackbone,) if is_torch_available() else () config_class = DinatConfig def setUp(self): self.model_tester = DinatModelTester(self)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dinov2 model import unittest from transformers import dinov2config from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testbackbonecommon import backbonetestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import dinov2backbone dinov2forimageclassification dinov2model from transformers models dinov2 modelingdinov2 import dinov2pretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class dinov2modeltester def init self parent batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope in dinov2 the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return dinov2config imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange def createandcheckmodelself config pixelvalues labels model dinov2modelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckbackboneself config pixelvalues labels model dinov2backboneconfigconfig model totorchdevice model eval result modelpixelvalues verify hidden states self parent assertequallenresult featuremaps lenconfig outfeatures expectedsize self imagesize config patchsize self parent assertlistequal listresult featuremaps0 shape self batchsize model channels0 expectedsize expectedsize verify channels self parent assertequallenmodel channels lenconfig outfeatures verify backbone works with outfeaturesnone config outfeatures none model dinov2backboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequal listresult featuremaps0 shape self batchsize model channels0 expectedsize expectedsize verify channels self parent assertequallenmodel channels 1 verify backbone works with applylayernormfalse and reshapehiddenstatesfalse config applylayernorm false config reshapehiddenstates false model dinov2backboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequal listresult featuremaps0 shape self batchsize self seqlength self hiddensize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model dinov2forimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model dinov2forimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class dinov2modeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses dinov2model dinov2forimageclassification dinov2backbone if istorchavailable else pipelinemodelmapping featureextraction dinov2model imageclassification dinov2forimageclassification if istorchavailable else fxcompatible true testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester dinov2modeltesterself self configtester configtesterself configclassdinov2config hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasondinov2 does not use inputsembeds def testinputsembedsself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testbackboneself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbackboneconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs unittest skipreasondinov2 does not support feedforward chunking yet def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself for modelname in dinov2pretrainedmodelarchivelist 1 model dinov2model frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class dinov2modelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedfacebookdinov2base if isvisionavailable else none slow def testinferencenoheadself model dinov2model frompretrainedfacebookdinov2base totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the last hidden states expectedshape torch size1 257 768 self assertequaloutputs lasthiddenstate shape expectedshape expectedslice torch tensor 2 1747 0 4729 1 0936 3 2780 0 8269 0 9210 2 9129 1 1284 0 7306 devicetorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 requiretorch class dinov2backbonetestunittest testcase backbonetestermixin allmodelclasses dinov2backbone if istorchavailable else configclass dinov2config hasattentions false def setupself self modeltester dinov2modeltesterself coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch dinov2 model in dinov2 the seq length equals the number of patches 1 we add 1 for the cls token verify hidden states verify channels verify backbone works with out_features none verify feature maps verify channels verify backbone works with apply_layernorm false and reshape_hidden_states false verify feature maps test greyscale images here we also overwrite some of the tests of test_modeling_common py as dinov2 does not use input_ids inputs_embeds attention_mask and seq_length we will verify our results on an image of cute cats forward pass verify the last hidden states
import unittest from transformers import Dinov2Config from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Dinov2Backbone, Dinov2ForImageClassification, Dinov2Model from transformers.models.dinov2.modeling_dinov2 import DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class Dinov2ModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return Dinov2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = Dinov2Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_backbone(self, config, pixel_values, labels): model = Dinov2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) expected_size = self.image_size // config.patch_size self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size] ) self.parent.assertEqual(len(model.channels), len(config.out_features)) config.out_features = None model = Dinov2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size] ) self.parent.assertEqual(len(model.channels), 1) config.apply_layernorm = False config.reshape_hidden_states = False model = Dinov2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.seq_length, self.hidden_size] ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = Dinov2ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = Dinov2ForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Dinov2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Dinov2Model, Dinov2ForImageClassification, Dinov2Backbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": Dinov2Model, "image-classification": Dinov2ForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Dinov2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Dinov2Config, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Dinov2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="Dinov2 does not support feedforward chunking yet") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): for model_name in DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Dinov2Model.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class Dinov2ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("facebook/dinov2-base") if is_vision_available() else None @slow def test_inference_no_head(self): model = Dinov2Model.from_pretrained("facebook/dinov2-base").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 257, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-2.1747, -0.4729, 1.0936], [-3.2780, -0.8269, -0.9210], [-2.9129, 1.1284, -0.7306]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) @require_torch class Dinov2BackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (Dinov2Backbone,) if is_torch_available() else () config_class = Dinov2Config has_attentions = False def setUp(self): self.model_tester = Dinov2ModelTester(self)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license bertformultiplechoice behaves incorrectly in jit environments because distilbertformultiplechoice requires inputs with different shapes we need to override this test because distilbertformultiplechoice requires inputs with different shapes we need to override this test coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license bertformultiplechoice behaves incorrectly in jit environments because distilbertformultiplechoice requires inputs with different shapes we need to override this test because distilbertformultiplechoice requires inputs with different shapes we need to override this test
import os import tempfile import unittest import pytest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class DistilBertModelTester(object): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_distilbert_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DistilBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_distilbert_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DistilBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_distilbert_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DistilBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_distilbert_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DistilBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_distilbert_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DistilBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_distilbert_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = DistilBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class DistilBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = True def setUp(self): self.model_tester = DistilBertModelTester(self) self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_distilbert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DistilBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class == DistilBertForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt")) loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference(self): import torch for model_class in self.all_model_classes: dummy_input = torch.LongTensor( [ [1, 2, 3, 4], [1, 2, 8, 9], [1, 2, 11, 12], [1, 2, 13, 14], ] ).to(torch_device) dummy_attention_mask = torch.LongTensor( [ [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], ] ).to(torch_device) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False ) model.to(torch_device) logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)) output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits_fa = output_fa.hidden_states[-1] output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits = output.hidden_states[-1] self.assertTrue(torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2)) @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): import torch for model_class in self.all_model_classes: dummy_input = torch.LongTensor( [ [1, 2, 3, 4], [1, 2, 8, 9], [1, 2, 11, 12], [1, 2, 13, 14], ] ).to(torch_device) dummy_attention_mask = torch.LongTensor( [ [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], ] ).to(torch_device) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False ) model.to(torch_device) logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)) output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits_fa = output_fa.hidden_states[-1] output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits = output.hidden_states[-1] self.assertTrue(torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2)) @require_torch class DistilBertModelIntergrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = DistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class FlaxDistilBertModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, tie_weights_=True, ) return config, input_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class FlaxDistilBertModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxDistilBertModelTester(self) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("distilbert-base-uncased") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs) @require_flax class FlaxDistilBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = (1, 11, 768) self.assertEqual(output.shape, expected_shape) expected_slice = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]]) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))