Description
stringlengths
18
161k
Code
stringlengths
15
300k
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch seamlessm4t model import copy import tempfile import unittest from transformers import seamlessm4tconfig isspeechavailable istorchavailable from transformers testingutils import requiretorch slow torchdevice from transformers trainerutils import setseed from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import seamlessm4tforspeechtospeech seamlessm4tforspeechtotext seamlessm4tfortexttospeech seamlessm4tfortexttotext seamlessm4tmodel from transformers models seamlessm4t modelingseamlessm4t import seamlessm4tpretrainedmodelarchivelist if isspeechavailable from transformers import seamlessm4tprocessor class seamlessm4tmodeltester def init self parent inputmodalityspeech batchsize2 seqlength4 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 initializerrange0 02 maxnewtokensnone numlabels3 numchoices4 scopenone vocabsize20 t2uvocabsize20 hiddensize6 numhiddenlayers2 intermediatesize6 maxpositionembeddings256 encoderlayers2 decoderlayers2 encoderffndim6 decoderffndim6 t2uencoderlayers2 t2udecoderlayers2 t2uencoderffndim6 t2udecoderffndim6 numheads2 vocodernumspkrs5 vocodernumlangs5 upsampleinitialchannel32 unitembeddim25 spkrembeddim6 langembeddim6 numconvposembeddings8 unithifiganvocabsize20 t2unumlangs0 t2umaxnewtokens25 t2uoffsettgtlang0 vocoderoffset0 self parent parent self inputmodality inputmodality self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope self vocabsize vocabsize self t2uvocabsize t2uvocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self intermediatesize intermediatesize self maxpositionembeddings maxpositionembeddings self encoderlayers encoderlayers self decoderlayers decoderlayers self encoderffndim encoderffndim self decoderffndim decoderffndim self t2uencoderlayers t2uencoderlayers self t2udecoderlayers t2udecoderlayers self t2uencoderffndim t2uencoderffndim self t2udecoderffndim t2udecoderffndim self numheads numheads self numattentionheads numheads self vocodernumspkrs vocodernumspkrs self vocodernumlangs vocodernumlangs self upsampleinitialchannel upsampleinitialchannel self unitembeddim unitembeddim self spkrembeddim spkrembeddim self numconvposembeddings numconvposembeddings self langembeddim langembeddim self maxnewtokens maxnewtokens self unithifiganvocabsize unithifiganvocabsize self t2unumlangs t2unumlangs self t2umaxnewtokens t2umaxnewtokens self t2uoffsettgtlang t2uoffsettgtlang self vocoderoffset vocoderoffset def prepareconfigandinputsself if self inputmodality text inputs idstensorself batchsize self seqlength self vocabsize 1 else inputs idstensorself batchsize self seqlength 160 self vocabsize 1 float inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength decoderinputids idstensorself batchsize self seqlength self vocabsize 1 lmlabels idstensorself batchsize self seqlength self numlabels config self getconfig return config inputs decoderinputids inputmask lmlabels def getconfigself return seamlessm4tconfig hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange vocabsizeself vocabsize t2uvocabsizeself t2uvocabsize hiddensizeself hiddensize speechencoderlayersself numheads speechencoderintermediatesizeself intermediatesize maxpositionembeddingsself maxpositionembeddings encoderlayersself encoderlayers decoderlayersself decoderlayers encoderffndimself encoderffndim decoderffndimself decoderffndim t2uencoderlayersself t2uencoderlayers t2udecoderlayersself t2udecoderlayers t2uencoderffndimself t2uencoderffndim t2udecoderffndimself t2udecoderffndim numattentionheadsself numheads encoderattentionheadsself numheads decoderattentionheadsself numheads t2uencoderattentionheadsself numheads t2udecoderattentionheadsself numheads speechencoderattentionheadsself numheads unithifiganvocabviseself t2uvocabsize vocodernumspkrsself vocodernumspkrs vocodernumlangsself vocodernumlangs upsampleinitialchannelself upsampleinitialchannel unitembeddimself unitembeddim spkrembeddimself spkrembeddim numconvposembeddingsself numconvposembeddings langembeddimself langembeddim maxnewtokensself maxnewtokens unithifiganvocabsizeself unithifiganvocabsize t2unumlangsself t2unumlangs t2umaxnewtokensself t2umaxnewtokens t2uoffsettgtlangself t2uoffsettgtlang vocoderoffsetself vocoderoffset def prepareconfigandinputsfordecoderself config inputids decoderinputids inputmask lmlabels self prepareconfigandinputs config isdecoder true encoderhiddenstates floatstensorself batchsize self seqlength self hiddensize encoderattentionmask idstensorself batchsize self seqlength vocabsize2 return config inputids decoderinputids inputmask lmlabels encoderhiddenstates encoderattentionmask def createandcheckmodelself config inputids decoderinputids inputmask labels model seamlessm4tmodelconfigconfig model totorchdevice model eval if self inputmodality text result modelinputidsinputids attentionmaskinputmask decoderinputidsdecoderinputids result modelinputidsinputids decoderinputidsdecoderinputids self parent assertequalresult logits shape self batchsize self seqlength self vocabsize else result modelinputfeaturesinputids attentionmaskinputmask decoderinputidsdecoderinputids result modelinputfeaturesinputids decoderinputidsdecoderinputids self parent assertequalresult logits shape self batchsize self seqlength self vocabsize decoderoutput result logits decoderpast result pastkeyvalues encoderoutput result encoderlasthiddenstate if self inputmodality text seqlength self seqlength else if speech expected length has been subsampled seqlength model computesubsamplelengthsfromattentionmaskinputmask max item self parent assertequalencoderoutput size self batchsize seqlength self hiddensize self parent assertequaldecoderoutput size self batchsize decoderinputids shape1 self vocabsize there should be numlayers key value embeddings stored in decoderpast self parent assertequallendecoderpast config decoderlayers there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoderpast tuple self parent assertequallendecoderpast0 4 def createandcheckdecodermodelpastlargeinputs self config inputids decoderinputids inputmask lmlabels encoderhiddenstates encoderattentionmask config isdecoder true model seamlessm4tmodelconfigconfig model totorchdevice model eval make sure no pad token in decoderinputids decoderinputids torch clampdecoderinputids config padtokenid 1 first forward pass outputs model inputids decoderinputidsdecoderinputids decoderattentionmaskinputmask usecachetrue pastkeyvalues outputs pastkeyvalues create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextmask idstensorself batchsize 3 vocabsize2 append to next inputids and nextinputids torch catdecoderinputids nexttokens dim1 nextattentionmask torch catinputmask nextmask dim1 outputfromnopast model inputids decoderinputidsnextinputids decoderattentionmasknextattentionmask outputhiddenstatestrue outputfromnopast outputfromnopastdecoderhiddenstates0 outputfrompast model inputids decoderinputidsnexttokens decoderattentionmasknextattentionmask pastkeyvaluespastkeyvalues outputhiddenstatestrue decoderhiddenstates0 select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids decoderinputids inputmask lmlabels configandinputs inputname inputids if self inputmodality text else inputfeatures inputsdict inputname inputids attentionmask inputmask decoderinputids decoderinputids labels lmlabels return config inputsdict requiretorch class seamlessm4tmodelwithspeechinputtestmodeltestermixin unittest testcase isencoderdecoder true fxcompatible false testmissingkeys false testpruning false testmodelparallel false testresizeembeddings false testheadmasking false testtorchscript false allmodelclasses seamlessm4tmodel seamlessm4tforspeechtospeech seamlessm4tforspeechtotext if istorchavailable else allgenerativemodelclasses seamlessm4tforspeechtotext if istorchavailable else inputname inputfeatures def setupself self modeltester seamlessm4tmodeltesterself inputmodalityspeech self configtester configtesterself configclassseamlessm4tconfig def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs slow def testmodelfrompretrainedself for modelname in seamlessm4tpretrainedmodelarchivelist 1 model seamlessm4tmodel frompretrainedmodelname self assertisnotnonemodel def getinputidsandconfigself batchsize2 config inputsdict self modeltester prepareconfigandinputsforcommon inputids inputsdictself inputname cut to half length take max batchsize 3 sequencelength inputids shape1 2 inputids inputids batchsize sequencelength generate max 3 tokens maxlength inputids shape1 3 if config eostokenid is not none and config padtokenid is none hack to allow generate for models such as gpt2 as is done in generate if isinstanceconfig eostokenid int config eostokenid config eostokenid config padtokenid config eostokenid0 attentionmask torch onesinputids shape 2 dtypetorch long batchsize sequencelength return config inputids float attentionmask maxlength staticmethod def getencoderoutputs model inputids attentionmask outputattentionsnone outputhiddenstatesnone numinterleave1 encoder model getencoder encoderoutputs encoder inputids attentionmaskattentionmask outputattentionsoutputattentions outputhiddenstatesoutputhiddenstates encoderoutputslasthiddenstate encoderoutputs lasthiddenstate repeatinterleave numinterleave dim0 inputids torch zerosinputids shape 2 dtypetorch int64 layoutinputids layout deviceinputids device model getdecoderstarttokenid attentionmask none return encoderoutputs inputids attentionmask def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias posbiasv posbiasu pointwiseconv1 pointwiseconv2 featureprojection projection weight featureprojection projection bias objective weight adapter if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized unittest skipreasonseamlessm4tspeechencoder doesn t have an embedding layer def testinputsembedsself pass unittest skip reasonexpected missing keys serve when using seamlessm4tforxxx frompretrained from a checkpoint saved by seamlessm4tmodel savepretrained def testmodelweightsreloadnomissingtiedweightsself pass unittest skip reasonseamlessm4tmodel is base class but has actually a bigger architecture than seamlessm4t taskspecific models def testsaveloadfastinittobaseself pass unittest skipreasonseamlessm4tmodel can takes inputids or inputfeatures def testforwardsignatureself pass unittest skipreasonseamlessm4t has no base model def testsaveloadfastinitfrombaseself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testattentionoutputsself expected length is subsampled so need to change a bit this test if not self hasattentions self skiptestreasonmodel does not output attentions config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen decoderkeylength getattrself modeltester decoderkeylength decoderseqlength encoderkeylength getattrself modeltester keylength encoderseqlength no more chunklength test for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength outlen lenoutputs if self isencoderdecoder correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers subsampledlength model computesubsamplelengthsfromattentionmaskinputsdictattentionmask max item self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength subsampledlength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes elif self isencoderdecoder addedhiddenstates 2 else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength requiretorch class seamlessm4tmodelwithtextinputtest modeltestermixin generationtestermixin pipelinetestermixin unittest testcase isencoderdecoder true fxcompatible false testmissingkeys false testpruning false testmodelparallel false testresizeembeddings true testheadmasking false testtorchscript false allmodelclasses seamlessm4tmodel seamlessm4tfortexttospeech seamlessm4tfortexttotext if istorchavailable else allgenerativemodelclasses seamlessm4tfortexttotext if istorchavailable else pipelinemodelmapping automaticspeechrecognition seamlessm4tforspeechtotext conversational seamlessm4tfortexttotext featureextraction seamlessm4tmodel summarization seamlessm4tfortexttotext texttoaudio seamlessm4tfortexttospeech text2textgeneration seamlessm4tfortexttotext translation seamlessm4tfortexttotext if istorchavailable else def setupself self modeltester seamlessm4tmodeltesterself inputmodalitytext self configtester configtesterself configclassseamlessm4tconfig def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs slow def testmodelfrompretrainedself for modelname in seamlessm4tpretrainedmodelarchivelist 1 model seamlessm4tmodel frompretrainedmodelname self assertisnotnonemodel def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias posbiasv posbiasu pointwiseconv1 pointwiseconv2 featureprojection projection weight featureprojection projection bias objective weight adapter if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized unittest skip reasonexpected missing keys serve when using seamlessm4tforxxx frompretrained from a checkpoint saved by seamlessm4tmodel savepretrained def testmodelweightsreloadnomissingtiedweightsself pass unittest skipreasonseamlessm4tmodel can take inputids or inputfeatures def testforwardsignatureself pass def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs unittest skip reasonseamlessm4tmodel is base class but has actually a bigger architecture than seamlessm4t taskspecific models def testsaveloadfastinittobaseself pass unittest skipreasonseamlessm4t has no base model def testsaveloadfastinitfrombaseself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass requiretorch class seamlessm4tgenerationtestunittest testcase test that nonstandard generation works test generation of seamlessm4tmodel seamlessm4tforspeechtospeech seamlessm4tforspeechtotext seamlessm4tfortexttospeech def setupself self speechmodeltester seamlessm4tmodeltesterself inputmodalityspeech self textmodeltester seamlessm4tmodeltesterself inputmodalitytext self tmpdirname tempfile mkdtemp def updategenerationself model langcodetoid fra 4 eng 4 generationconfig copy deepcopymodel generationconfig generationconfig setattrtextdecoderlangtocodeid langcodetoid generationconfig setattrt2ulangcodetoid langcodetoid generationconfig setattrvocoderlangcodetoid langcodetoid generationconfig frommodelconfig false model generationconfig generationconfig def preparetextinputself config inputs decoderinputids inputmask lmlabels self textmodeltester prepareconfigandinputs inputdict inputids inputs attentionmask inputmask tgtlang eng numbeams 2 dosample true return config inputdict def preparespeechinputself config inputs decoderinputids inputmask lmlabels self speechmodeltester prepareconfigandinputs inputdict inputfeatures inputs attentionmask inputmask tgtlang fra numbeams 2 dosample true return config inputdict def preparespeechandtextinputself config inputs decoderinputids inputmask lmlabels self speechmodeltester prepareconfigandinputs inputspeech inputfeatures inputs attentionmask inputmask tgtlang fra numbeams 2 dosample true config inputs decoderinputids inputmask lmlabels self textmodeltester prepareconfigandinputs inputtext inputids inputs attentionmask inputmask tgtlang eng numbeams 2 dosample true return config inputspeech inputtext def factorygenerationspeechtestself model inputs setseed0 output model generateinputs return output def testspeechgenerationself config inputspeech inputtext self preparespeechandtextinput model seamlessm4tmodelconfigconfig self updategenerationmodel model savepretrainedself tmpdirname model totorchdevice model eval outputoriginaltext self factorygenerationspeechtestmodel inputtext outputoriginalspeech self factorygenerationspeechtestmodel inputspeech statedict model statedict textmodel seamlessm4tfortexttospeech frompretrainedself tmpdirname self updategenerationtextmodel textmodel totorchdevice textmodel eval outputtext self factorygenerationspeechtestmodel inputtext speechmodel seamlessm4tforspeechtospeech frompretrainedself tmpdirname self updategenerationspeechmodel speechmodel totorchdevice speechmodel eval for name tensor in speechmodel statedict items righttensor statedict getname self assertequaltensor tolist righttensor tolist ftensor name outputspeech self factorygenerationspeechtestmodel inputspeech test same text output from input text self assertlistequaloutputoriginaltext0 ravel tolist outputtext0 ravel tolist self assertlistequaloutputoriginaltext1 ravel tolist outputtext1 ravel tolist test same speech output from input text asserttrue because super long list makes this hang in case of failure self asserttrue outputoriginalspeech0 ravel tolist outputspeech0 ravel tolist speech generated was different self asserttrue outputoriginalspeech1 ravel tolist outputspeech1 ravel tolist speech generated was different def testtextgenerationself config inputspeech inputtext self preparespeechandtextinput to return speech inputspeechgeneratespeech false inputtextgeneratespeech false model seamlessm4tmodelconfigconfig self updategenerationmodel model savepretrainedself tmpdirname model totorchdevice model eval outputoriginaltext self factorygenerationspeechtestmodel inputtext outputoriginalspeech self factorygenerationspeechtestmodel inputspeech other models don t need it inputspeech popgeneratespeech inputtext popgeneratespeech statedict model statedict textmodel seamlessm4tfortexttotext frompretrainedself tmpdirname self updategenerationtextmodel textmodel totorchdevice textmodel eval for name tensor in textmodel statedict items righttensor statedict getname self assertequaltensor tolist righttensor tolist outputtext self factorygenerationspeechtesttextmodel inputtext speechmodel seamlessm4tforspeechtotext frompretrainedself tmpdirname for name tensor in speechmodel statedict items righttensor statedict getname self assertequaltensor tolist righttensor tolist ftensor name self updategenerationspeechmodel speechmodel totorchdevice speechmodel eval outputspeech self factorygenerationspeechtestspeechmodel inputspeech test same text output from input text self assertlistequaloutputoriginaltext0 ravel tolist outputtext ravel tolist test same speech output from input text self assertlistequaloutputoriginalspeech0 ravel tolist outputspeech ravel tolist def testgenerationself config inputspeech inputtext self preparespeechandtextinput inputspeechnumbeams 3 inputspeechdosample true inputspeechnumreturnsequences 3 inputtextnumbeams 3 inputtextdosample true inputtextnumreturnsequences 3 for modelclass in seamlessm4tforspeechtospeech seamlessm4tforspeechtotext seamlessm4tmodel model modelclassconfigconfig self updategenerationmodel model totorchdevice model eval output model generateinputspeech output output0 if isinstanceoutput tuple else output self assertequaloutput shape0 3 inputspeechinputfeatures shape0 for modelclass in seamlessm4tfortexttospeech seamlessm4tfortexttotext seamlessm4tmodel model modelclassconfigconfig self updategenerationmodel model totorchdevice model eval output model generateinputtext output output0 if isinstanceoutput tuple else output self assertequaloutput shape0 3 inputtextinputids shape0 requiretorch class seamlessm4tmodelintegrationtestunittest testcase repoid facebookhfseamlessm4tmedium def assertlistalmostequalself list1 list2 tol1e3 self assertequallenlist1 lenlist2 for a b in ziplist1 list2 self assertalmostequala b deltatol cachedproperty def processorself return seamlessm4tprocessor frompretrainedself repoid cachedproperty def inputtextself corresponds to c est un test with seamlessm4tmedium checkpoint inputids torch tensor256057 152 248116 354 159 7356 248075 3 fmt skip inputids inputids totorchdevice attentionmask torch oneslikeinputids totorchdevice inputs attentionmask attentionmask inputids inputids return inputs cachedproperty def inputaudioself setseed0 seqlen 20000 samplingrate 16000 inputfeatures torch rand2 seqlen return self processoraudiosinputfeatures tolist samplingratesamplingrate returntensorspt to torchdevice def factorytesttaskself class1 class2 inputs class1kwargs class2kwargs model1 class1 frompretrainedself repoid totorchdevice model2 class2 frompretrainedself repoid totorchdevice setseed0 output1 model1 generateinputs class1kwargs setseed0 output2 model2 generateinputs class2kwargs for key in output1 if isinstanceoutput1key torch tensor if lenoutput1key shape 0 self assertequaloutput1key item output2key item else self assertlistalmostequaloutput1key squeeze tolist output2key squeeze tolist slow def testtoengtextself model seamlessm4tmodel frompretrainedself repoid totorchdevice test text tgt lang eng expectedtexttokens 3 256047 3291 248116 248066 9 7356 248075 3 fmt skip fmt off expectedunittokens 2 10051 8980 8212 949 1270 4311 1123 5918 2333 5311 3882 2415 5284 1123 612 8816 6370 5386 7334 4345 5645 9437 5748 1378 9818 4319 7968 7375 2909 9119 5151 8728 5335 3896 4013 8939 8885 6048 9530 3167 5833 1072 693 431 9867 364 7909 4608 5938 1889 9984 7947 4944 6171 3767 9861 9169 1187 8365 4571 7635 7784 7635 800 2393 32 5380 5852 8289 2530 2762 1833 2056 3553 4641 3553 5683 370 2288 1344 1518 7534 703 8359 7699 2 fmt on expectedwavslice 3e05 0 0004 0 00037 0 00013 6e05 0 00012 0 00016 0 00025 7e05 3e05 fmt skip setseed0 output model generateself inputtext numbeams1 tgtlangeng returnintermediatetokenidstrue self assertlistequalexpectedtexttokens output sequences squeeze tolist for now only first units correspondance self assertlistequalexpectedunittokens 10 output unitsequences squeeze tolist 10 self assertlistalmostequalexpectedwavslice output waveform squeeze tolist50 60 slow def testtoswhtextself model seamlessm4tmodel frompretrainedself repoid totorchdevice test text tgt lang swh expectedtexttokens 3 256168 1665 188589 7040 248075 3 fmt skip fmt off expectedunittokens 2 10071 5729 9995 3089 7546 1204 1721 2532 4340 5623 3496 432 7730 9096 7677 3143 8211 6447 8399 4248 3565 4529 7700 9308 217 6476 3485 9667 3194 8476 4923 5593 1148 4466 7416 4872 463 4872 253 2348 4640 3450 2133 6318 2806 817 7613 2698 6563 8712 8344 9286 6878 6387 4281 6387 640 6387 3200 640 8355 640 6708 979 1738 2 fmt on expectedwavslice 1e05 7e05 4e05 4e05 6e05 9e05 0 0001 2e05 7e05 2e05 fmt skip setseed0 output model generateself inputtext numbeams1 tgtlangswh returnintermediatetokenidstrue self assertlistequalexpectedtexttokens output sequences squeeze tolist self assertlistequalexpectedunittokens 10 output unitsequences squeeze tolist 10 self assertlistalmostequalexpectedwavslice output waveform squeeze tolist50 60 slow def testtorusspeechself model seamlessm4tmodel frompretrainedself repoid totorchdevice test audio tgt lang rus expectedtexttokens 3 256147 1197 73565 3413 537 233331 248075 3 fmt skip fmt off expectedunittokens 2 10067 5729 4798 9631 8378 4446 2393 6901 5983 2817 4629 8532 1991 2931 8576 8857 5936 4317 9000 7740 7995 1225 5980 6094 1420 5373 8771 6600 4487 7029 3630 6740 4870 1483 3003 5585 5511 7465 3222 32 6272 1950 3120 5368 639 3713 5935 7943 567 6129 6822 1226 5063 9878 7756 8825 1078 5943 457 9282 9668 817 7613 2698 6563 8712 8704 9286 8704 6387 4281 6387 640 3200 6387 640 8355 6708 979 1738 2 fmt on expectedwavslice 0 00013 0 00012 0 00014 3e05 0 0 6e05 0 00018 0 00016 0 00021 0 00018 fmt skip setseed0 output model generateself inputaudio numbeams1 tgtlangrus returnintermediatetokenidstrue self assertlistequalexpectedtexttokens output sequences squeeze tolist self assertlistequalexpectedunittokens 10 output unitsequences squeeze tolist 10 self assertlistalmostequalexpectedwavslice output waveform squeeze tolist50 60 slow def testtexttotextmodelself kwargs1 tgtlang eng returnintermediatetokenids true generatespeech false kwargs2 tgtlang eng outputhiddenstates true returndictingenerate true outputscores true self factorytesttaskseamlessm4tmodel seamlessm4tfortexttotext self inputtext kwargs1 kwargs2 slow def testspeechtotextmodelself kwargs1 tgtlang eng returnintermediatetokenids true generatespeech false kwargs2 tgtlang eng outputhiddenstates true returndictingenerate true outputscores true self factorytesttaskseamlessm4tmodel seamlessm4tforspeechtotext self inputaudio kwargs1 kwargs2 slow def testspeechtospeechmodelself kwargs1 tgtlang eng returnintermediatetokenids true self factorytesttaskseamlessm4tmodel seamlessm4tforspeechtospeech self inputaudio kwargs1 kwargs1 slow def testtexttospeechmodelself kwargs1 tgtlang eng returnintermediatetokenids true self factorytesttaskseamlessm4tmodel seamlessm4tfortexttospeech self inputtext kwargs1 kwargs1 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch seamlessm4t model if speech expected length has been subsampled there should be num_layers key value embeddings stored in decoder_past there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoder_past tuple make sure no pad token in decoder_input_ids first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice cut to half length take max batch_size 3 generate max 3 tokens hack to allow generate for models such as gpt2 as is done in generate expected length is subsampled so need to change a bit this test no more chunk_length test check that output_attentions also work using config loss is at first position loss is added to beginning past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine test that non standard generation works test generation of seamlessm4tmodel seamlessm4tforspeechtospeech seamlessm4tforspeechtotext seamlessm4tfortexttospeech test same text output from input text test same speech output from input text asserttrue because super long list makes this hang in case of failure to return speech other models don t need it test same text output from input text test same speech output from input text corresponds to c est un test with seamlessm4t_medium checkpoint fmt skip test text tgt lang eng fmt skip fmt off fmt on fmt skip for now only first units correspondance test text tgt lang swh fmt skip fmt off fmt on fmt skip test audio tgt lang rus fmt skip fmt off fmt on fmt skip
import copy import tempfile import unittest from transformers import SeamlessM4TConfig, is_speech_available, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.trainer_utils import set_seed from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, SeamlessM4TForTextToSpeech, SeamlessM4TForTextToText, SeamlessM4TModel, ) from transformers.models.seamless_m4t.modeling_seamless_m4t import ( SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_speech_available(): from transformers import SeamlessM4TProcessor class SeamlessM4TModelTester: def __init__( self, parent, input_modality="speech", batch_size=2, seq_length=4, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_new_tokens=None, num_labels=3, num_choices=4, scope=None, vocab_size=20, t2u_vocab_size=20, hidden_size=6, num_hidden_layers=2, intermediate_size=6, max_position_embeddings=256, encoder_layers=2, decoder_layers=2, encoder_ffn_dim=6, decoder_ffn_dim=6, t2u_encoder_layers=2, t2u_decoder_layers=2, t2u_encoder_ffn_dim=6, t2u_decoder_ffn_dim=6, num_heads=2, vocoder_num_spkrs=5, vocoder_num_langs=5, upsample_initial_channel=32, unit_embed_dim=25, spkr_embed_dim=6, lang_embed_dim=6, num_conv_pos_embeddings=8, unit_hifi_gan_vocab_size=20, t2u_num_langs=0, t2u_max_new_tokens=25, t2u_offset_tgt_lang=0, vocoder_offset=0, ): self.parent = parent self.input_modality = input_modality self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.vocab_size = vocab_size self.t2u_vocab_size = t2u_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.max_position_embeddings = max_position_embeddings self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.encoder_ffn_dim = encoder_ffn_dim self.decoder_ffn_dim = decoder_ffn_dim self.t2u_encoder_layers = t2u_encoder_layers self.t2u_decoder_layers = t2u_decoder_layers self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim self.num_heads = num_heads self.num_attention_heads = num_heads self.vocoder_num_spkrs = vocoder_num_spkrs self.vocoder_num_langs = vocoder_num_langs self.upsample_initial_channel = upsample_initial_channel self.unit_embed_dim = unit_embed_dim self.spkr_embed_dim = spkr_embed_dim self.num_conv_pos_embeddings = num_conv_pos_embeddings self.lang_embed_dim = lang_embed_dim self.max_new_tokens = max_new_tokens self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size self.t2u_num_langs = t2u_num_langs self.t2u_max_new_tokens = t2u_max_new_tokens self.t2u_offset_tgt_lang = t2u_offset_tgt_lang self.vocoder_offset = vocoder_offset def prepare_config_and_inputs(self): if self.input_modality == "text": inputs = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) else: inputs = ids_tensor([self.batch_size, self.seq_length, 160], self.vocab_size - 1).float() input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) lm_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, inputs, decoder_input_ids, input_mask, lm_labels def get_config(self): return SeamlessM4TConfig( hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, vocab_size=self.vocab_size, t2u_vocab_size=self.t2u_vocab_size, hidden_size=self.hidden_size, speech_encoder_layers=self.num_heads, speech_encoder_intermediate_size=self.intermediate_size, max_position_embeddings=self.max_position_embeddings, encoder_layers=self.encoder_layers, decoder_layers=self.decoder_layers, encoder_ffn_dim=self.encoder_ffn_dim, decoder_ffn_dim=self.decoder_ffn_dim, t2u_encoder_layers=self.t2u_encoder_layers, t2u_decoder_layers=self.t2u_decoder_layers, t2u_encoder_ffn_dim=self.t2u_encoder_ffn_dim, t2u_decoder_ffn_dim=self.t2u_decoder_ffn_dim, num_attention_heads=self.num_heads, encoder_attention_heads=self.num_heads, decoder_attention_heads=self.num_heads, t2u_encoder_attention_heads=self.num_heads, t2u_decoder_attention_heads=self.num_heads, speech_encoder_attention_heads=self.num_heads, unit_hifigan_vocab_vise=self.t2u_vocab_size, vocoder_num_spkrs=self.vocoder_num_spkrs, vocoder_num_langs=self.vocoder_num_langs, upsample_initial_channel=self.upsample_initial_channel, unit_embed_dim=self.unit_embed_dim, spkr_embed_dim=self.spkr_embed_dim, num_conv_pos_embeddings=self.num_conv_pos_embeddings, lang_embed_dim=self.lang_embed_dim, max_new_tokens=self.max_new_tokens, unit_hifi_gan_vocab_size=self.unit_hifi_gan_vocab_size, t2u_num_langs=self.t2u_num_langs, t2u_max_new_tokens=self.t2u_max_new_tokens, t2u_offset_tgt_lang=self.t2u_offset_tgt_lang, vocoder_offset=self.vocoder_offset, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, decoder_input_ids, input_mask, lm_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, decoder_input_ids, input_mask, lm_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model(self, config, input_ids, decoder_input_ids, input_mask, labels): model = SeamlessM4TModel(config=config) model.to(torch_device) model.eval() if self.input_modality == "text": result = model(input_ids=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) else: result = model(input_features=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids) result = model(input_features=input_ids, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) decoder_output = result.logits decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state if self.input_modality == "text": seq_length = self.seq_length else: seq_length = model._compute_sub_sample_lengths_from_attention_mask(input_mask).max().item() self.parent.assertEqual(encoder_output.size(), (self.batch_size, seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, decoder_input_ids.shape[1], self.vocab_size)) self.parent.assertEqual(len(decoder_past), config.decoder_layers) self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, input_mask, lm_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True model = SeamlessM4TModel(config=config) model.to(torch_device) model.eval() decoder_input_ids = torch.clamp(decoder_input_ids, config.pad_token_id + 1) outputs = model( input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=input_mask, use_cache=True ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([decoder_input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( input_ids, decoder_input_ids=next_input_ids, decoder_attention_mask=next_attention_mask, output_hidden_states=True, ) output_from_no_past = output_from_no_past["decoder_hidden_states"][0] output_from_past = model( input_ids, decoder_input_ids=next_tokens, decoder_attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["decoder_hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, input_mask, lm_labels, ) = config_and_inputs input_name = "input_ids" if self.input_modality == "text" else "input_features" inputs_dict = { input_name: input_ids, "attention_mask": input_mask, "decoder_input_ids": decoder_input_ids, "labels": lm_labels, } return config, inputs_dict @require_torch class SeamlessM4TModelWithSpeechInputTest(ModelTesterMixin, unittest.TestCase): is_encoder_decoder = True fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = False test_headmasking = False test_torchscript = False all_model_classes = ( ( SeamlessM4TModel, SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, ) if is_torch_available() else () ) all_generative_model_classes = (SeamlessM4TForSpeechToText,) if is_torch_available() else () input_name = "input_features" def setUp(self): self.model_tester = SeamlessM4TModelTester(self, input_modality="speech") self.config_tester = ConfigTester(self, config_class=SeamlessM4TConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SeamlessM4TModel.from_pretrained(model_name) self.assertIsNotNone(model) def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:batch_size, :sequence_length] max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] attention_mask = torch.ones(input_ids.shape[:2], dtype=torch.long)[:batch_size, :sequence_length] return config, input_ids.float(), attention_mask, max_length @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = ( torch.zeros(input_ids.shape[:2], dtype=torch.int64, layout=input_ids.layout, device=input_ids.device) + model._get_decoder_start_token_id() ) attention_mask = None return encoder_outputs, input_ids, attention_mask def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", "adapter", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="SeamlessM4TSpeechEncoder doesn't have an embedding layer") def test_inputs_embeds(self): pass @unittest.skip( reason="Expected missing keys serve when using SeamlessM4TForXXX.from_pretrained from a checkpoint saved by SeamlessM4TModel.save_pretrained." ) def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip( reason="SeamlessM4TModel is base class but has actually a bigger architecture than seamlessM4T task-specific models." ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SeamlessM4TModel can takes input_ids or input_features") def test_forward_signature(self): pass @unittest.skip(reason="SeamlessM4T has no base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) sub_sampled_length = ( model._compute_sub_sample_lengths_from_attention_mask(inputs_dict["attention_mask"]).max().item() ) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, sub_sampled_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @require_torch class SeamlessM4TModelWithTextInputTest( ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase ): is_encoder_decoder = True fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = True test_headmasking = False test_torchscript = False all_model_classes = ( ( SeamlessM4TModel, SeamlessM4TForTextToSpeech, SeamlessM4TForTextToText, ) if is_torch_available() else () ) all_generative_model_classes = (SeamlessM4TForTextToText,) if is_torch_available() else () pipeline_model_mapping = ( { "automatic-speech-recognition": SeamlessM4TForSpeechToText, "conversational": SeamlessM4TForTextToText, "feature-extraction": SeamlessM4TModel, "summarization": SeamlessM4TForTextToText, "text-to-audio": SeamlessM4TForTextToSpeech, "text2text-generation": SeamlessM4TForTextToText, "translation": SeamlessM4TForTextToText, } if is_torch_available() else {} ) def setUp(self): self.model_tester = SeamlessM4TModelTester(self, input_modality="text") self.config_tester = ConfigTester(self, config_class=SeamlessM4TConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SeamlessM4TModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", "adapter", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip( reason="Expected missing keys serve when using SeamlessM4TForXXX.from_pretrained from a checkpoint saved by SeamlessM4TModel.save_pretrained." ) def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip(reason="SeamlessM4TModel can take input_ids or input_features") def test_forward_signature(self): pass def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) @unittest.skip( reason="SeamlessM4TModel is base class but has actually a bigger architecture than seamlessM4T task-specific models." ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SeamlessM4T has no base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class SeamlessM4TGenerationTest(unittest.TestCase): def setUp(self): self.speech_model_tester = SeamlessM4TModelTester(self, input_modality="speech") self.text_model_tester = SeamlessM4TModelTester(self, input_modality="text") self.tmpdirname = tempfile.mkdtemp() def update_generation(self, model): lang_code_to_id = { "fra": 4, "eng": 4, } generation_config = copy.deepcopy(model.generation_config) generation_config.__setattr__("text_decoder_lang_to_code_id", lang_code_to_id) generation_config.__setattr__("t2u_lang_code_to_id", lang_code_to_id) generation_config.__setattr__("vocoder_lang_code_to_id", lang_code_to_id) generation_config._from_model_config = False model.generation_config = generation_config def prepare_text_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_dict = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": "eng", "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_dict = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_and_text_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_speech = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_text = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": "eng", "num_beams": 2, "do_sample": True, } return config, input_speech, input_text def factory_generation_speech_test(self, model, inputs): set_seed(0) output = model.generate(**inputs) return output def test_speech_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() model = SeamlessM4TModel(config=config) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) state_dict = model.state_dict() text_model = SeamlessM4TForTextToSpeech.from_pretrained(self.tmpdirname) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() output_text = self.factory_generation_speech_test(model, input_text) speech_model = SeamlessM4TForSpeechToSpeech.from_pretrained(self.tmpdirname) self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") output_speech = self.factory_generation_speech_test(model, input_speech) self.assertListEqual(output_original_text[0].ravel().tolist(), output_text[0].ravel().tolist()) self.assertListEqual(output_original_text[1].ravel().tolist(), output_text[1].ravel().tolist()) self.assertTrue( output_original_speech[0].ravel().tolist() == output_speech[0].ravel().tolist(), "Speech generated was different", ) self.assertTrue( output_original_speech[1].ravel().tolist() == output_speech[1].ravel().tolist(), "Speech generated was different", ) def test_text_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() input_speech["generate_speech"] = False input_text["generate_speech"] = False model = SeamlessM4TModel(config=config) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) input_speech.pop("generate_speech") input_text.pop("generate_speech") state_dict = model.state_dict() text_model = SeamlessM4TForTextToText.from_pretrained(self.tmpdirname) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() for name, tensor in text_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist()) output_text = self.factory_generation_speech_test(text_model, input_text) speech_model = SeamlessM4TForSpeechToText.from_pretrained(self.tmpdirname) for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() output_speech = self.factory_generation_speech_test(speech_model, input_speech) self.assertListEqual(output_original_text[0].ravel().tolist(), output_text.ravel().tolist()) self.assertListEqual(output_original_speech[0].ravel().tolist(), output_speech.ravel().tolist()) def test_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() input_speech["num_beams"] = 3 input_speech["do_sample"] = True input_speech["num_return_sequences"] = 3 input_text["num_beams"] = 3 input_text["do_sample"] = True input_text["num_return_sequences"] = 3 for model_class in [SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, SeamlessM4TModel]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_speech) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_speech["input_features"].shape[0]) for model_class in [SeamlessM4TForTextToSpeech, SeamlessM4TForTextToText, SeamlessM4TModel]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_text) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_text["input_ids"].shape[0]) @require_torch class SeamlessM4TModelIntegrationTest(unittest.TestCase): repo_id = "facebook/hf-seamless-m4t-medium" def assertListAlmostEqual(self, list1, list2, tol=1e-3): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) @cached_property def processor(self): return SeamlessM4TProcessor.from_pretrained(self.repo_id) @cached_property def input_text(self): input_ids = torch.tensor([[256057, 152, 248116, 354, 159, 7356, 248075, 3]]) input_ids = input_ids.to(torch_device) attention_mask = torch.ones_like(input_ids).to(torch_device) inputs = { "attention_mask": attention_mask, "input_ids": input_ids, } return inputs @cached_property def input_audio(self): set_seed(0) seq_len = 20000 sampling_rate = 16000 input_features = torch.rand((2, seq_len)) return self.processor(audios=[input_features.tolist()], sampling_rate=sampling_rate, return_tensors="pt").to( torch_device ) def factory_test_task(self, class1, class2, inputs, class1_kwargs, class2_kwargs): model1 = class1.from_pretrained(self.repo_id).to(torch_device) model2 = class2.from_pretrained(self.repo_id).to(torch_device) set_seed(0) output_1 = model1.generate(**inputs, **class1_kwargs) set_seed(0) output_2 = model2.generate(**inputs, **class2_kwargs) for key in output_1: if isinstance(output_1[key], torch.Tensor): if len(output_1[key].shape) == 0: self.assertEqual(output_1[key].item(), output_2[key].item()) else: self.assertListAlmostEqual(output_1[key].squeeze().tolist(), output_2[key].squeeze().tolist()) @slow def test_to_eng_text(self): model = SeamlessM4TModel.from_pretrained(self.repo_id).to(torch_device) expected_text_tokens = [3, 256047, 3291, 248116, 248066, 9, 7356, 248075, 3] expected_unit_tokens = [ 2,10051,8980,8212,949,1270,4311,1123,5918,2333,5311,3882,2415,5284,1123,612,8816,6370,5386,7334,4345,5645, 9437,5748,1378,9818,4319,7968,7375,2909,9119,5151,8728,5335,3896,4013,8939,8885,6048,9530,3167,5833,1072,693, 431,9867,364,7909,4608,5938,1889,9984,7947,4944,6171,3767,9861,9169,1187,8365,4571,7635,7784,7635,800,2393, 32,5380,5852,8289,2530,2762,1833,2056,3553,4641,3553,5683,370,2288,1344,1518,7534,703,8359,7699,2 ] expected_wav_slice = [-3e-05, -0.0004, -0.00037, -0.00013, -6e-05, 0.00012, -0.00016, 0.00025, 7e-05, -3e-05] set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="eng", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual(expected_unit_tokens[:10], output.unit_sequences.squeeze().tolist()[:10]) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) @slow def test_to_swh_text(self): model = SeamlessM4TModel.from_pretrained(self.repo_id).to(torch_device) expected_text_tokens = [3, 256168, 1665, 188589, 7040, 248075, 3] expected_unit_tokens = [ 2,10071,5729,9995,3089,7546,1204,1721,2532,4340,5623,3496,432,7730,9096,7677,3143,8211,6447,8399,4248,3565, 4529,7700,9308,217,6476,3485,9667,3194,8476,4923,5593,1148,4466,7416,4872,463,4872,253,2348,4640,3450,2133, 6318,2806,817,7613,2698,6563,8712,8344,9286,6878,6387,4281,6387,640,6387,3200,640,8355,640,6708,979,1738,2 ] expected_wav_slice = [1e-05, -7e-05, -4e-05, -4e-05, -6e-05, -9e-05, -0.0001, -2e-05, -7e-05, -2e-05] set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="swh", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual(expected_unit_tokens[:10], output.unit_sequences.squeeze().tolist()[:10]) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) @slow def test_to_rus_speech(self): model = SeamlessM4TModel.from_pretrained(self.repo_id).to(torch_device) expected_text_tokens = [3, 256147, 1197, 73565, 3413, 537, 233331, 248075, 3] expected_unit_tokens = [ 2, 10067, 5729, 4798, 9631, 8378, 4446, 2393, 6901, 5983, 2817, 4629, 8532, 1991, 2931, 8576, 8857, 5936, 4317, 9000, 7740, 7995, 1225, 5980, 6094, 1420, 5373, 8771, 6600, 4487, 7029, 3630, 6740, 4870, 1483, 3003, 5585, 5511, 7465, 3222, 32, 6272, 1950, 3120, 5368, 639, 3713, 5935, 7943, 567, 6129, 6822, 1226, 5063, 9878, 7756, 8825, 1078, 5943, 457, 9282, 9668, 817, 7613, 2698, 6563, 8712, 8704, 9286, 8704, 6387, 4281, 6387, 640, 3200, 6387, 640, 8355, 6708, 979, 1738, 2 ] expected_wav_slice = [0.00013, 0.00012, 0.00014, 3e-05, 0.0, -6e-05, -0.00018, -0.00016, -0.00021, -0.00018] set_seed(0) output = model.generate(**self.input_audio, num_beams=1, tgt_lang="rus", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual(expected_unit_tokens[:10], output.unit_sequences.squeeze().tolist()[:10]) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) @slow def test_text_to_text_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False} kwargs2 = { "tgt_lang": "eng", "output_hidden_states": True, "return_dict_in_generate": True, "output_scores": True, } self.factory_test_task(SeamlessM4TModel, SeamlessM4TForTextToText, self.input_text, kwargs1, kwargs2) @slow def test_speech_to_text_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False} kwargs2 = { "tgt_lang": "eng", "output_hidden_states": True, "return_dict_in_generate": True, "output_scores": True, } self.factory_test_task(SeamlessM4TModel, SeamlessM4TForSpeechToText, self.input_audio, kwargs1, kwargs2) @slow def test_speech_to_speech_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True} self.factory_test_task(SeamlessM4TModel, SeamlessM4TForSpeechToSpeech, self.input_audio, kwargs1, kwargs1) @slow def test_text_to_speech_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True} self.factory_test_task(SeamlessM4TModel, SeamlessM4TForTextToSpeech, self.input_text, kwargs1, kwargs1)
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from test models whisper testprocessorwhisper whisperprocessortest testfeatureextractor with whisperseamlessm4t copied from test models whisper testprocessorwhisper whisperprocessortest testtokenizer with whisperseamlessm4t copied from test models whisper testprocessorwhisper whisperprocessortest testtokenizerdecode with whisperseamlessm4t 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from test models whisper test_processor_whisper whisperprocessortest test_feature_extractor with whisper seamlessm4t copied from test models whisper test_processor_whisper whisperprocessortest test_tokenizer with whisper seamlessm4t copied from test models whisper test_processor_whisper whisperprocessortest test_tokenizer_decode with whisper seamlessm4t
import shutil import tempfile import unittest from transformers import SeamlessM4TFeatureExtractor, SeamlessM4TProcessor from transformers.models.seamless_m4t import ( SeamlessM4TTokenizer, SeamlessM4TTokenizerFast, ) from transformers.testing_utils import require_torch from .test_feature_extraction_seamless_m4t import floats_list @require_torch class SeamlessM4TProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "facebook/hf-seamless-m4t-medium" self.tmpdirname = tempfile.mkdtemp() def get_tokenizer(self, **kwargs): return SeamlessM4TTokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return SeamlessM4TFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = SeamlessM4TProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) tokenizer_instance = isinstance(processor.tokenizer, SeamlessM4TTokenizerFast) or isinstance( processor.tokenizer, SeamlessM4TTokenizer ) self.assertTrue(tokenizer_instance) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, SeamlessM4TFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = SeamlessM4TProcessor( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = SeamlessM4TProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, SeamlessM4TFeatureExtractor) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) tokenizer_instance = isinstance(processor.tokenizer, SeamlessM4TTokenizerFast) or isinstance( processor.tokenizer, SeamlessM4TTokenizer ) self.assertTrue(tokenizer_instance) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(audios=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SeamlessM4TProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch seamlessm4tv2 model import copy import tempfile import unittest from transformers import seamlessm4tv2config isspeechavailable istorchavailable from transformers testingutils import requiretorch slow torchdevice from transformers trainerutils import setseed from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask if istorchavailable import torch from transformers import seamlessm4tv2forspeechtospeech seamlessm4tv2forspeechtotext seamlessm4tv2fortexttospeech seamlessm4tv2fortexttotext seamlessm4tv2model from transformers models seamlessm4tv2 modelingseamlessm4tv2 import seamlessm4tv2pretrainedmodelarchivelist if isspeechavailable from transformers import seamlessm4tprocessor class seamlessm4tv2modeltester def init self parent inputmodalityspeech batchsize2 seqlength4 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 initializerrange0 02 maxnewtokensnone numlabels3 numchoices4 scopenone vocabsize20 t2uvocabsize20 hiddensize6 numhiddenlayers2 intermediatesize6 maxpositionembeddings256 encoderlayers2 decoderlayers2 encoderffndim6 decoderffndim6 t2uencoderlayers2 t2udecoderlayers2 t2uencoderffndim6 t2udecoderffndim6 numheads2 vocodernumspkrs5 vocodernumlangs5 upsampleinitialchannel32 unitembeddim25 spkrembeddim6 langembeddim6 numconvposembeddings8 unithifiganvocabsize20 t2unumlangs0 t2uoffsettgtlang0 vocoderoffset0 t2uvariancepredictorhiddendim4 charvocabsize4 leftmaxpositionembeddings2 rightmaxpositionembeddings1 speechencoderchunksize2 speechencoderleftchunknum1 self parent parent self inputmodality inputmodality self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope self vocabsize vocabsize self t2uvocabsize t2uvocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self intermediatesize intermediatesize self maxpositionembeddings maxpositionembeddings self encoderlayers encoderlayers self decoderlayers decoderlayers self encoderffndim encoderffndim self decoderffndim decoderffndim self t2uencoderlayers t2uencoderlayers self t2udecoderlayers t2udecoderlayers self t2uencoderffndim t2uencoderffndim self t2udecoderffndim t2udecoderffndim self numheads numheads self numattentionheads numheads self vocodernumspkrs vocodernumspkrs self vocodernumlangs vocodernumlangs self upsampleinitialchannel upsampleinitialchannel self unitembeddim unitembeddim self spkrembeddim spkrembeddim self numconvposembeddings numconvposembeddings self langembeddim langembeddim self maxnewtokens maxnewtokens self unithifiganvocabsize unithifiganvocabsize self t2unumlangs t2unumlangs self t2uoffsettgtlang t2uoffsettgtlang self vocoderoffset vocoderoffset self t2uvariancepredictorhiddendim t2uvariancepredictorhiddendim self charvocabsize charvocabsize self leftmaxpositionembeddings leftmaxpositionembeddings self rightmaxpositionembeddings rightmaxpositionembeddings self speechencoderchunksize speechencoderchunksize self speechencoderleftchunknum speechencoderleftchunknum def prepareconfigandinputsself if self inputmodality text inputs idstensorself batchsize self seqlength self vocabsize 1 else inputs idstensorself batchsize self seqlength 160 self vocabsize 1 float inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength decoderinputids idstensorself batchsize self seqlength self vocabsize 1 lmlabels idstensorself batchsize self seqlength self numlabels config self getconfig return config inputs decoderinputids inputmask lmlabels def getconfigself return seamlessm4tv2config hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange vocabsizeself vocabsize t2uvocabsizeself t2uvocabsize hiddensizeself hiddensize speechencoderlayersself numheads speechencoderintermediatesizeself intermediatesize maxpositionembeddingsself maxpositionembeddings encoderlayersself encoderlayers decoderlayersself decoderlayers encoderffndimself encoderffndim decoderffndimself decoderffndim t2uencoderlayersself t2uencoderlayers t2udecoderlayersself t2udecoderlayers t2uencoderffndimself t2uencoderffndim t2udecoderffndimself t2udecoderffndim numattentionheadsself numheads encoderattentionheadsself numheads decoderattentionheadsself numheads t2uencoderattentionheadsself numheads t2udecoderattentionheadsself numheads speechencoderattentionheadsself numheads unithifiganvocabviseself t2uvocabsize vocodernumspkrsself vocodernumspkrs vocodernumlangsself vocodernumlangs upsampleinitialchannelself upsampleinitialchannel unitembeddimself unitembeddim spkrembeddimself spkrembeddim numconvposembeddingsself numconvposembeddings langembeddimself langembeddim maxnewtokensself maxnewtokens unithifiganvocabsizeself unithifiganvocabsize t2unumlangsself t2unumlangs t2uoffsettgtlangself t2uoffsettgtlang vocoderoffsetself vocoderoffset t2uvariancepredictorembeddimself hiddensize t2uvariancepredictorhiddendimself t2uvariancepredictorhiddendim charvocabsizeself charvocabsize leftmaxpositionembeddingsself leftmaxpositionembeddings rightmaxpositionembeddingsself rightmaxpositionembeddings speechencoderchunksizeself speechencoderchunksize speechencoderleftchunknumself speechencoderleftchunknum def prepareconfigandinputsfordecoderself config inputids decoderinputids inputmask lmlabels self prepareconfigandinputs config isdecoder true encoderhiddenstates floatstensorself batchsize self seqlength self hiddensize encoderattentionmask idstensorself batchsize self seqlength vocabsize2 return config inputids decoderinputids inputmask lmlabels encoderhiddenstates encoderattentionmask def createandcheckmodelself config inputids decoderinputids inputmask labels model seamlessm4tv2modelconfigconfig model totorchdevice model eval if self inputmodality text result modelinputidsinputids attentionmaskinputmask decoderinputidsdecoderinputids result modelinputidsinputids decoderinputidsdecoderinputids self parent assertequalresult logits shape self batchsize self seqlength self vocabsize else result modelinputfeaturesinputids attentionmaskinputmask decoderinputidsdecoderinputids result modelinputfeaturesinputids decoderinputidsdecoderinputids self parent assertequalresult logits shape self batchsize self seqlength self vocabsize decoderoutput result logits decoderpast result pastkeyvalues encoderoutput result encoderlasthiddenstate if self inputmodality text seqlength self seqlength else if speech expected length has been subsampled seqlength model computesubsamplelengthsfromattentionmaskinputmask max item self parent assertequalencoderoutput size self batchsize seqlength self hiddensize self parent assertequaldecoderoutput size self batchsize decoderinputids shape1 self vocabsize there should be numlayers key value embeddings stored in decoderpast self parent assertequallendecoderpast config decoderlayers there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoderpast tuple self parent assertequallendecoderpast0 4 def createandcheckdecodermodelpastlargeinputs self config inputids decoderinputids inputmask lmlabels encoderhiddenstates encoderattentionmask config isdecoder true model seamlessm4tv2modelconfigconfig model totorchdevice model eval make sure no pad token in decoderinputids decoderinputids torch clampdecoderinputids config padtokenid 1 first forward pass outputs model inputids decoderinputidsdecoderinputids decoderattentionmaskinputmask usecachetrue pastkeyvalues outputs pastkeyvalues create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize nextmask idstensorself batchsize 3 vocabsize2 append to next inputids and nextinputids torch catdecoderinputids nexttokens dim1 nextattentionmask torch catinputmask nextmask dim1 outputfromnopast model inputids decoderinputidsnextinputids decoderattentionmasknextattentionmask outputhiddenstatestrue outputfromnopast outputfromnopastdecoderhiddenstates0 outputfrompast model inputids decoderinputidsnexttokens decoderattentionmasknextattentionmask pastkeyvaluespastkeyvalues outputhiddenstatestrue decoderhiddenstates0 select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids decoderinputids inputmask lmlabels configandinputs inputname inputids if self inputmodality text else inputfeatures inputsdict inputname inputids attentionmask inputmask decoderinputids decoderinputids labels lmlabels return config inputsdict requiretorch class seamlessm4tv2modelwithspeechinputtestmodeltestermixin unittest testcase isencoderdecoder true fxcompatible false testmissingkeys false testpruning false testmodelparallel false testresizeembeddings false testheadmasking false testtorchscript false allmodelclasses seamlessm4tv2model seamlessm4tv2forspeechtospeech seamlessm4tv2forspeechtotext if istorchavailable else allgenerativemodelclasses seamlessm4tv2forspeechtotext if istorchavailable else inputname inputfeatures def setupself self modeltester seamlessm4tv2modeltesterself inputmodalityspeech self configtester configtesterself configclassseamlessm4tv2config def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs slow def testmodelfrompretrainedself for modelname in seamlessm4tv2pretrainedmodelarchivelist model seamlessm4tv2model frompretrainedmodelname self assertisnotnonemodel def getinputidsandconfigself batchsize2 config inputsdict self modeltester prepareconfigandinputsforcommon inputids inputsdictself inputname cut to half length take max batchsize 3 sequencelength inputids shape1 2 inputids inputids batchsize sequencelength generate max 3 tokens maxlength inputids shape1 3 if config eostokenid is not none and config padtokenid is none hack to allow generate for models such as gpt2 as is done in generate if isinstanceconfig eostokenid int config eostokenid config eostokenid config padtokenid config eostokenid0 attentionmask torch onesinputids shape 2 dtypetorch long batchsize sequencelength return config inputids float attentionmask maxlength staticmethod def getencoderoutputs model inputids attentionmask outputattentionsnone outputhiddenstatesnone numinterleave1 encoder model getencoder encoderoutputs encoder inputids attentionmaskattentionmask outputattentionsoutputattentions outputhiddenstatesoutputhiddenstates encoderoutputslasthiddenstate encoderoutputs lasthiddenstate repeatinterleave numinterleave dim0 inputids torch zerosinputids shape 2 dtypetorch int64 layoutinputids layout deviceinputids device model getdecoderstarttokenid attentionmask none return encoderoutputs inputids attentionmask def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias posbiasv posbiasu pointwiseconv1 pointwiseconv2 featureprojection projection weight featureprojection projection bias objective weight adapter if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized unittest skipreasonseamlessm4tv2speechencoder doesn t have an embedding layer def testinputsembedsself pass unittest skip reasonexpected missing keys serve when using seamlessm4tv2forxxx frompretrained from a checkpoint saved by seamlessm4tv2model savepretrained def testmodelweightsreloadnomissingtiedweightsself pass unittest skip reasonseamlessm4tv2model is base class but has actually a bigger architecture than seamlessm4t taskspecific models def testsaveloadfastinittobaseself pass unittest skipreasonseamlessm4tv2model can takes inputids or inputfeatures def testforwardsignatureself pass unittest skipreasonseamlessm4tv2 has no base model def testsaveloadfastinitfrombaseself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testattentionoutputsself expected length is subsampled so need to change a bit this test if not self hasattentions self skiptestreasonmodel does not output attentions config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen decoderkeylength getattrself modeltester decoderkeylength decoderseqlength encoderkeylength getattrself modeltester keylength encoderseqlength no more chunklength test for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength outlen lenoutputs if self isencoderdecoder correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers subsampledlength model computesubsamplelengthsfromattentionmaskinputsdictattentionmask max item self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength subsampledlength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes elif self isencoderdecoder addedhiddenstates 2 else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength requiretorch class seamlessm4tv2modelwithtextinputtestmodeltestermixin generationtestermixin unittest testcase isencoderdecoder true fxcompatible false testmissingkeys false testpruning false testmodelparallel false testresizeembeddings true testheadmasking false testtorchscript false allmodelclasses seamlessm4tv2model seamlessm4tv2fortexttospeech seamlessm4tv2fortexttotext if istorchavailable else allgenerativemodelclasses seamlessm4tv2fortexttotext if istorchavailable else def setupself self modeltester seamlessm4tv2modeltesterself inputmodalitytext self configtester configtesterself configclassseamlessm4tv2config def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs slow def testmodelfrompretrainedself for modelname in seamlessm4tv2pretrainedmodelarchivelist model seamlessm4tv2model frompretrainedmodelname self assertisnotnonemodel def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias posbiasv posbiasu pointwiseconv1 pointwiseconv2 featureprojection projection weight featureprojection projection bias objective weight adapter if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized unittest skip reasonexpected missing keys serve when using seamlessm4tv2forxxx frompretrained from a checkpoint saved by seamlessm4tv2model savepretrained def testmodelweightsreloadnomissingtiedweightsself pass unittest skipreasonseamlessm4tv2model can take inputids or inputfeatures def testforwardsignatureself pass def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputsfordecoder self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs unittest skip reasonseamlessm4tv2model is base class but has actually a bigger architecture than seamlessm4t taskspecific models def testsaveloadfastinittobaseself pass unittest skipreasonseamlessm4tv2 has no base model def testsaveloadfastinitfrombaseself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass requiretorch class seamlessm4tv2generationtestunittest testcase test that nonstandard generation works test generation of seamlessm4tv2model seamlessm4tv2forspeechtospeech seamlessm4tv2forspeechtotext seamlessm4tv2fortexttospeech def setupself self speechmodeltester seamlessm4tv2modeltesterself inputmodalityspeech self textmodeltester seamlessm4tv2modeltesterself inputmodalitytext self tmpdirname tempfile mkdtemp def updategenerationself model langcodetoid fra 4 eng 4 idtotext stri a for i in rangemodel config vocabsize idtotext0 ab idtotext1 b idtotext3 idtotext4 cd chartoid char i for i char in enumerateabcd generationconfig copy deepcopymodel generationconfig generationconfig setattrtextdecoderlangtocodeid langcodetoid generationconfig setattrt2ulangcodetoid langcodetoid generationconfig setattrvocoderlangcodetoid langcodetoid generationconfig setattridtotext idtotext generationconfig setattrchartoid chartoid generationconfig setattreostokenid 0 generationconfig frommodelconfig false model generationconfig generationconfig def preparetextinputself config inputs decoderinputids inputmask lmlabels self textmodeltester prepareconfigandinputs inputdict inputids inputs attentionmask inputmask tgtlang eng numbeams 2 dosample true return config inputdict def preparespeechinputself config inputs decoderinputids inputmask lmlabels self speechmodeltester prepareconfigandinputs inputdict inputfeatures inputs attentionmask inputmask tgtlang fra numbeams 2 dosample true return config inputdict def preparespeechandtextinputself config inputs decoderinputids inputmask lmlabels self speechmodeltester prepareconfigandinputs inputspeech inputfeatures inputs attentionmask inputmask tgtlang fra numbeams 2 dosample true config inputs decoderinputids inputmask lmlabels self textmodeltester prepareconfigandinputs inputtext inputids inputs attentionmask inputmask tgtlang eng numbeams 2 dosample true return config inputspeech inputtext def factorygenerationspeechtestself model inputs setseed0 output model generateinputs return output def testspeechgenerationself config inputspeech inputtext self preparespeechandtextinput model seamlessm4tv2modelconfigconfig self updategenerationmodel model savepretrainedself tmpdirname model totorchdevice model eval outputoriginaltext self factorygenerationspeechtestmodel inputtext outputoriginalspeech self factorygenerationspeechtestmodel inputspeech statedict model statedict textmodel seamlessm4tv2fortexttospeech frompretrainedself tmpdirname self updategenerationtextmodel textmodel totorchdevice textmodel eval outputtext self factorygenerationspeechtestmodel inputtext speechmodel seamlessm4tv2forspeechtospeech frompretrainedself tmpdirname self updategenerationspeechmodel speechmodel totorchdevice speechmodel eval for name tensor in speechmodel statedict items righttensor statedict getname self assertequaltensor tolist righttensor tolist ftensor name outputspeech self factorygenerationspeechtestmodel inputspeech test same text output from input text self assertlistequaloutputoriginaltext0 ravel tolist outputtext0 ravel tolist self assertlistequaloutputoriginaltext1 ravel tolist outputtext1 ravel tolist test same speech output from input text asserttrue because super long list makes this hang in case of failure self asserttrue outputoriginalspeech0 ravel tolist outputspeech0 ravel tolist speech generated was different self asserttrue outputoriginalspeech1 ravel tolist outputspeech1 ravel tolist speech generated was different def testtextgenerationself config inputspeech inputtext self preparespeechandtextinput to return speech inputspeechgeneratespeech false inputtextgeneratespeech false model seamlessm4tv2modelconfigconfig self updategenerationmodel model savepretrainedself tmpdirname model totorchdevice model eval outputoriginaltext self factorygenerationspeechtestmodel inputtext outputoriginalspeech self factorygenerationspeechtestmodel inputspeech other models don t need it inputspeech popgeneratespeech inputtext popgeneratespeech statedict model statedict textmodel seamlessm4tv2fortexttotext frompretrainedself tmpdirname self updategenerationtextmodel textmodel totorchdevice textmodel eval for name tensor in textmodel statedict items righttensor statedict getname self assertequaltensor tolist righttensor tolist outputtext self factorygenerationspeechtesttextmodel inputtext speechmodel seamlessm4tv2forspeechtotext frompretrainedself tmpdirname for name tensor in speechmodel statedict items righttensor statedict getname self assertequaltensor tolist righttensor tolist ftensor name self updategenerationspeechmodel speechmodel totorchdevice speechmodel eval outputspeech self factorygenerationspeechtestspeechmodel inputspeech test same text output from input text self assertlistequaloutputoriginaltext0 ravel tolist outputtext ravel tolist test same speech output from input text self assertlistequaloutputoriginalspeech0 ravel tolist outputspeech ravel tolist def testgenerationself config inputspeech inputtext self preparespeechandtextinput inputspeechnumbeams 3 inputspeechdosample true inputspeechtemperature 0 5 inputspeechnumreturnsequences 3 inputtextnumbeams 3 inputtextdosample true inputtexttemperature 0 5 inputtextnumreturnsequences 3 for modelclass in seamlessm4tv2forspeechtospeech seamlessm4tv2forspeechtotext seamlessm4tv2model model modelclassconfigconfig self updategenerationmodel model totorchdevice model eval output model generateinputspeech output output0 if isinstanceoutput tuple else output self assertequaloutput shape0 3 inputspeechinputfeatures shape0 for modelclass in seamlessm4tv2fortexttospeech seamlessm4tv2fortexttotext seamlessm4tv2model model modelclassconfigconfig self updategenerationmodel model totorchdevice model eval output model generateinputtext output output0 if isinstanceoutput tuple else output self assertequaloutput shape0 3 inputtextinputids shape0 requiretorch class seamlessm4tv2modelintegrationtestunittest testcase repoid facebookseamlessm4tv2large def assertlistalmostequalself list1 list2 tol1e4 self assertequallenlist1 lenlist2 for a b in ziplist1 list2 self assertalmostequala b deltatol cachedproperty def processorself return seamlessm4tprocessor frompretrainedself repoid cachedproperty def inputtextself corresponds to c est un test with seamlessm4tmedium checkpoint inputids torch tensor256026 109 247729 171 128 6816 247676 3 fmt skip inputids inputids totorchdevice attentionmask torch oneslikeinputids totorchdevice inputs attentionmask attentionmask inputids inputids return inputs cachedproperty def inputaudioself setseed0 seqlen 20000 samplingrate 16000 inputfeatures torch rand2 seqlen return self processoraudiosinputfeatures tolist samplingratesamplingrate returntensorspt to torchdevice def factorytesttaskself class1 class2 inputs class1kwargs class2kwargs model1 class1 frompretrainedself repoid totorchdevice model2 class2 frompretrainedself repoid totorchdevice setseed0 output1 model1 generateinputs class1kwargs setseed0 output2 model2 generateinputs class2kwargs for key in output1 if isinstanceoutput1key torch tensor if lenoutput1key shape 0 self assertequaloutput1key item output2key item else self assertlistalmostequaloutput1key squeeze tolist output2key squeeze tolist slow def testtoengtextself model seamlessm4tv2model frompretrainedself repoid totorchdevice test text tgt lang eng expectedtexttokens 3 256022 3080 1 247669 10 6816 247676 3 fmt skip fmt off expectedunittokens 4746 7163 8208 8208 1315 1266 4307 1119 989 9594 3007 3007 4341 5205 7631 7631 3202 4061 9092 3191 7509 1715 5280 5280 3554 8812 8197 6366 5382 5382 7330 2758 9433 9433 6863 7510 5800 5800 5286 1948 1825 1825 3956 8724 8724 5331 8914 9315 9315 5288 2588 8167 8787 8787 8063 6008 2621 2621 2621 5696 fmt on expectedwavslice 9 485097e04 8 320558e04 7 178137e04 9 349979e04 1 121628e03 1 091766e03 1 279693e03 1 387754e03 1 296396e03 1 143557e03 fmt skip setseed0 output model generateself inputtext numbeams1 tgtlangeng returnintermediatetokenidstrue self assertlistequalexpectedtexttokens output sequences squeeze tolist self assertlistequal expectedunittokens output unitsequences model config vocoderoffset squeeze tolist self assertlistalmostequalexpectedwavslice output waveform squeeze tolist50 60 assert mean and std equality self assertlistalmostequal 2 349690e04 9 920777e02 output waveform mean item output waveform std item slow unittest skipreasonequivalence is broken since a new update def testtoswhtextself model seamlessm4tv2model frompretrainedself repoid totorchdevice test text tgt lang swh expectedtexttokens 3 256084 109 247729 171 10 6816 247676 3 fmt skip fmt off expectedunittokens 5725 7163 7472 7472 6915 3099 3099 9921 2765 6515 6515 1374 1374 1347 8252 9854 9854 5662 2420 6600 2216 4503 7208 6107 6107 7298 9123 6472 9663 9663 6366 6366 6445 575 3575 2052 2052 5788 5800 5800 5286 5286 1825 1825 3956 3956 8724 8724 5331 8914 8914 9315 9315 2821 8167 8167 8787 8787 8787 8700 8700 8700 2175 2175 3196 3196 2621 1725 1725 7507 5696 fmt on expectedwavslice 3 124037e04 2 450471e04 2 286572e04 2 317214e04 2 732605e04 2 478790e04 2 704144e04 2 665847e04 2 828784e04 2 684390e04 fmt skip setseed0 output model generateself inputtext numbeams1 tgtlangswh returnintermediatetokenidstrue self assertlistequalexpectedtexttokens output sequences squeeze tolist self assertlistequal expectedunittokens output unitsequences model config vocoderoffset squeeze tolist self assertlistalmostequalexpectedwavslice output waveform squeeze tolist50 60 assert mean and std equality self assertlistalmostequal 2 001826e04 8 580012e02 output waveform mean item output waveform std item slow def testtorusspeechself model seamlessm4tv2model frompretrainedself repoid totorchdevice test audio tgt lang rus expectedtexttokens 3 256074 107 248213 404 247792 247789 3 fmt skip fmt off expectedunittokens 8976 7163 6915 2728 2728 5198 3318 3318 3686 1049 9643 1200 2052 2052 8196 8196 7624 7624 7555 7555 7555 7555 9717 9717 4869 8167 8167 8167 8053 972 9362 8167 297 297 297 3993 3993 3993 3993 4660 4660 4660 4660 4660 4660 7962 7962 225 225 8737 4199 fmt on expectedwavslice 1 415287e03 1 360976e03 1 297727e03 1 305321e03 1 352087e03 1 283812e03 1 352623e03 1 387384e03 1 449627e03 1 411701e03 fmt skip setseed0 output model generateself inputaudio numbeams1 tgtlangrus returnintermediatetokenidstrue self assertlistequalexpectedtexttokens output sequences squeeze tolist self assertlistequal expectedunittokens output unitsequences model config vocoderoffset squeeze tolist self assertlistalmostequalexpectedwavslice output waveform squeeze tolist50 60 assert mean and std equality higher tolerance for speech self assertlistalmostequal 2 818016e04 7 169888e02 output waveform mean item output waveform std item tol5e4 slow def testtexttotextmodelself kwargs1 tgtlang eng returnintermediatetokenids true generatespeech false kwargs2 tgtlang eng outputhiddenstates true returndictingenerate true outputscores true self factorytesttaskseamlessm4tv2model seamlessm4tv2fortexttotext self inputtext kwargs1 kwargs2 slow def testspeechtotextmodelself kwargs1 tgtlang eng returnintermediatetokenids true generatespeech false kwargs2 tgtlang eng outputhiddenstates true returndictingenerate true outputscores true self factorytesttaskseamlessm4tv2model seamlessm4tv2forspeechtotext self inputaudio kwargs1 kwargs2 slow def testspeechtospeechmodelself kwargs1 tgtlang eng returnintermediatetokenids true self factorytesttaskseamlessm4tv2model seamlessm4tv2forspeechtospeech self inputaudio kwargs1 kwargs1 slow def testtexttospeechmodelself kwargs1 tgtlang eng returnintermediatetokenids true self factorytesttaskseamlessm4tv2model seamlessm4tv2fortexttospeech self inputtext kwargs1 kwargs1 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch seamlessm4tv2 model if speech expected length has been subsampled there should be num_layers key value embeddings stored in decoder_past there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoder_past tuple make sure no pad token in decoder_input_ids first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice cut to half length take max batch_size 3 generate max 3 tokens hack to allow generate for models such as gpt2 as is done in generate expected length is subsampled so need to change a bit this test no more chunk_length test check that output_attentions also work using config loss is at first position loss is added to beginning past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine test that non standard generation works test generation of seamlessm4tv2model seamlessm4tv2forspeechtospeech seamlessm4tv2forspeechtotext seamlessm4tv2fortexttospeech test same text output from input text test same speech output from input text asserttrue because super long list makes this hang in case of failure to return speech other models don t need it test same text output from input text test same speech output from input text corresponds to c est un test with seamlessm4t_medium checkpoint fmt skip test text tgt lang eng fmt skip fmt off fmt on fmt skip assert mean and std equality test text tgt lang swh fmt skip fmt off fmt on fmt skip assert mean and std equality test audio tgt lang rus fmt skip fmt off fmt on fmt skip assert mean and std equality higher tolerance for speech
import copy import tempfile import unittest from transformers import SeamlessM4Tv2Config, is_speech_available, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.trainer_utils import set_seed from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) if is_torch_available(): import torch from transformers import ( SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2ForTextToSpeech, SeamlessM4Tv2ForTextToText, SeamlessM4Tv2Model, ) from transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2 import ( SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_speech_available(): from transformers import SeamlessM4TProcessor class SeamlessM4Tv2ModelTester: def __init__( self, parent, input_modality="speech", batch_size=2, seq_length=4, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_new_tokens=None, num_labels=3, num_choices=4, scope=None, vocab_size=20, t2u_vocab_size=20, hidden_size=6, num_hidden_layers=2, intermediate_size=6, max_position_embeddings=256, encoder_layers=2, decoder_layers=2, encoder_ffn_dim=6, decoder_ffn_dim=6, t2u_encoder_layers=2, t2u_decoder_layers=2, t2u_encoder_ffn_dim=6, t2u_decoder_ffn_dim=6, num_heads=2, vocoder_num_spkrs=5, vocoder_num_langs=5, upsample_initial_channel=32, unit_embed_dim=25, spkr_embed_dim=6, lang_embed_dim=6, num_conv_pos_embeddings=8, unit_hifi_gan_vocab_size=20, t2u_num_langs=0, t2u_offset_tgt_lang=0, vocoder_offset=0, t2u_variance_predictor_hidden_dim=4, char_vocab_size=4, left_max_position_embeddings=2, right_max_position_embeddings=1, speech_encoder_chunk_size=2, speech_encoder_left_chunk_num=1, ): self.parent = parent self.input_modality = input_modality self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.vocab_size = vocab_size self.t2u_vocab_size = t2u_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.max_position_embeddings = max_position_embeddings self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.encoder_ffn_dim = encoder_ffn_dim self.decoder_ffn_dim = decoder_ffn_dim self.t2u_encoder_layers = t2u_encoder_layers self.t2u_decoder_layers = t2u_decoder_layers self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim self.num_heads = num_heads self.num_attention_heads = num_heads self.vocoder_num_spkrs = vocoder_num_spkrs self.vocoder_num_langs = vocoder_num_langs self.upsample_initial_channel = upsample_initial_channel self.unit_embed_dim = unit_embed_dim self.spkr_embed_dim = spkr_embed_dim self.num_conv_pos_embeddings = num_conv_pos_embeddings self.lang_embed_dim = lang_embed_dim self.max_new_tokens = max_new_tokens self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size self.t2u_num_langs = t2u_num_langs self.t2u_offset_tgt_lang = t2u_offset_tgt_lang self.vocoder_offset = vocoder_offset self.t2u_variance_predictor_hidden_dim = t2u_variance_predictor_hidden_dim self.char_vocab_size = char_vocab_size self.left_max_position_embeddings = left_max_position_embeddings self.right_max_position_embeddings = right_max_position_embeddings self.speech_encoder_chunk_size = speech_encoder_chunk_size self.speech_encoder_left_chunk_num = speech_encoder_left_chunk_num def prepare_config_and_inputs(self): if self.input_modality == "text": inputs = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) else: inputs = ids_tensor([self.batch_size, self.seq_length, 160], self.vocab_size - 1).float() input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) lm_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, inputs, decoder_input_ids, input_mask, lm_labels def get_config(self): return SeamlessM4Tv2Config( hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, vocab_size=self.vocab_size, t2u_vocab_size=self.t2u_vocab_size, hidden_size=self.hidden_size, speech_encoder_layers=self.num_heads, speech_encoder_intermediate_size=self.intermediate_size, max_position_embeddings=self.max_position_embeddings, encoder_layers=self.encoder_layers, decoder_layers=self.decoder_layers, encoder_ffn_dim=self.encoder_ffn_dim, decoder_ffn_dim=self.decoder_ffn_dim, t2u_encoder_layers=self.t2u_encoder_layers, t2u_decoder_layers=self.t2u_decoder_layers, t2u_encoder_ffn_dim=self.t2u_encoder_ffn_dim, t2u_decoder_ffn_dim=self.t2u_decoder_ffn_dim, num_attention_heads=self.num_heads, encoder_attention_heads=self.num_heads, decoder_attention_heads=self.num_heads, t2u_encoder_attention_heads=self.num_heads, t2u_decoder_attention_heads=self.num_heads, speech_encoder_attention_heads=self.num_heads, unit_hifigan_vocab_vise=self.t2u_vocab_size, vocoder_num_spkrs=self.vocoder_num_spkrs, vocoder_num_langs=self.vocoder_num_langs, upsample_initial_channel=self.upsample_initial_channel, unit_embed_dim=self.unit_embed_dim, spkr_embed_dim=self.spkr_embed_dim, num_conv_pos_embeddings=self.num_conv_pos_embeddings, lang_embed_dim=self.lang_embed_dim, max_new_tokens=self.max_new_tokens, unit_hifi_gan_vocab_size=self.unit_hifi_gan_vocab_size, t2u_num_langs=self.t2u_num_langs, t2u_offset_tgt_lang=self.t2u_offset_tgt_lang, vocoder_offset=self.vocoder_offset, t2u_variance_predictor_embed_dim=self.hidden_size, t2u_variance_predictor_hidden_dim=self.t2u_variance_predictor_hidden_dim, char_vocab_size=self.char_vocab_size, left_max_position_embeddings=self.left_max_position_embeddings, right_max_position_embeddings=self.right_max_position_embeddings, speech_encoder_chunk_size=self.speech_encoder_chunk_size, speech_encoder_left_chunk_num=self.speech_encoder_left_chunk_num, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, decoder_input_ids, input_mask, lm_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, decoder_input_ids, input_mask, lm_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model(self, config, input_ids, decoder_input_ids, input_mask, labels): model = SeamlessM4Tv2Model(config=config) model.to(torch_device) model.eval() if self.input_modality == "text": result = model(input_ids=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) else: result = model(input_features=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids) result = model(input_features=input_ids, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) decoder_output = result.logits decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state if self.input_modality == "text": seq_length = self.seq_length else: seq_length = model._compute_sub_sample_lengths_from_attention_mask(input_mask).max().item() self.parent.assertEqual(encoder_output.size(), (self.batch_size, seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, decoder_input_ids.shape[1], self.vocab_size)) self.parent.assertEqual(len(decoder_past), config.decoder_layers) self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, input_mask, lm_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True model = SeamlessM4Tv2Model(config=config) model.to(torch_device) model.eval() decoder_input_ids = torch.clamp(decoder_input_ids, config.pad_token_id + 1) outputs = model( input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=input_mask, use_cache=True ) past_key_values = outputs.past_key_values next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([decoder_input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( input_ids, decoder_input_ids=next_input_ids, decoder_attention_mask=next_attention_mask, output_hidden_states=True, ) output_from_no_past = output_from_no_past["decoder_hidden_states"][0] output_from_past = model( input_ids, decoder_input_ids=next_tokens, decoder_attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["decoder_hidden_states"][0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, input_mask, lm_labels, ) = config_and_inputs input_name = "input_ids" if self.input_modality == "text" else "input_features" inputs_dict = { input_name: input_ids, "attention_mask": input_mask, "decoder_input_ids": decoder_input_ids, "labels": lm_labels, } return config, inputs_dict @require_torch class SeamlessM4Tv2ModelWithSpeechInputTest(ModelTesterMixin, unittest.TestCase): is_encoder_decoder = True fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = False test_headmasking = False test_torchscript = False all_model_classes = ( ( SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, ) if is_torch_available() else () ) all_generative_model_classes = (SeamlessM4Tv2ForSpeechToText,) if is_torch_available() else () input_name = "input_features" def setUp(self): self.model_tester = SeamlessM4Tv2ModelTester(self, input_modality="speech") self.config_tester = ConfigTester(self, config_class=SeamlessM4Tv2Config) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST: model = SeamlessM4Tv2Model.from_pretrained(model_name) self.assertIsNotNone(model) def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:batch_size, :sequence_length] max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] attention_mask = torch.ones(input_ids.shape[:2], dtype=torch.long)[:batch_size, :sequence_length] return config, input_ids.float(), attention_mask, max_length @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = ( torch.zeros(input_ids.shape[:2], dtype=torch.int64, layout=input_ids.layout, device=input_ids.device) + model._get_decoder_start_token_id() ) attention_mask = None return encoder_outputs, input_ids, attention_mask def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", "adapter", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="SeamlessM4Tv2SpeechEncoder doesn't have an embedding layer") def test_inputs_embeds(self): pass @unittest.skip( reason="Expected missing keys serve when using SeamlessM4Tv2ForXXX.from_pretrained from a checkpoint saved by SeamlessM4Tv2Model.save_pretrained." ) def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip( reason="SeamlessM4Tv2Model is base class but has actually a bigger architecture than seamlessM4T task-specific models." ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SeamlessM4Tv2Model can takes input_ids or input_features") def test_forward_signature(self): pass @unittest.skip(reason="SeamlessM4Tv2 has no base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) sub_sampled_length = ( model._compute_sub_sample_lengths_from_attention_mask(inputs_dict["attention_mask"]).max().item() ) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, sub_sampled_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @require_torch class SeamlessM4Tv2ModelWithTextInputTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): is_encoder_decoder = True fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = True test_headmasking = False test_torchscript = False all_model_classes = ( ( SeamlessM4Tv2Model, SeamlessM4Tv2ForTextToSpeech, SeamlessM4Tv2ForTextToText, ) if is_torch_available() else () ) all_generative_model_classes = (SeamlessM4Tv2ForTextToText,) if is_torch_available() else () def setUp(self): self.model_tester = SeamlessM4Tv2ModelTester(self, input_modality="text") self.config_tester = ConfigTester(self, config_class=SeamlessM4Tv2Config) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST: model = SeamlessM4Tv2Model.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", "adapter", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip( reason="Expected missing keys serve when using SeamlessM4Tv2ForXXX.from_pretrained from a checkpoint saved by SeamlessM4Tv2Model.save_pretrained." ) def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip(reason="SeamlessM4Tv2Model can take input_ids or input_features") def test_forward_signature(self): pass def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) @unittest.skip( reason="SeamlessM4Tv2Model is base class but has actually a bigger architecture than seamlessM4T task-specific models." ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SeamlessM4Tv2 has no base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class SeamlessM4Tv2GenerationTest(unittest.TestCase): def setUp(self): self.speech_model_tester = SeamlessM4Tv2ModelTester(self, input_modality="speech") self.text_model_tester = SeamlessM4Tv2ModelTester(self, input_modality="text") self.tmpdirname = tempfile.mkdtemp() def update_generation(self, model): lang_code_to_id = { "fra": 4, "eng": 4, } id_to_text = {str(i): "a" for i in range(model.config.vocab_size)} id_to_text["0"] = "ab" id_to_text["1"] = "_b" id_to_text["3"] = "," id_to_text["4"] = "_cd" char_to_id = {char: i for (i, char) in enumerate("abcd")} generation_config = copy.deepcopy(model.generation_config) generation_config.__setattr__("text_decoder_lang_to_code_id", lang_code_to_id) generation_config.__setattr__("t2u_lang_code_to_id", lang_code_to_id) generation_config.__setattr__("vocoder_lang_code_to_id", lang_code_to_id) generation_config.__setattr__("id_to_text", id_to_text) generation_config.__setattr__("char_to_id", char_to_id) generation_config.__setattr__("eos_token_id", 0) generation_config._from_model_config = False model.generation_config = generation_config def prepare_text_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_dict = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": "eng", "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_dict = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_and_text_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_speech = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_text = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": "eng", "num_beams": 2, "do_sample": True, } return config, input_speech, input_text def factory_generation_speech_test(self, model, inputs): set_seed(0) output = model.generate(**inputs) return output def test_speech_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() model = SeamlessM4Tv2Model(config=config) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) state_dict = model.state_dict() text_model = SeamlessM4Tv2ForTextToSpeech.from_pretrained(self.tmpdirname) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() output_text = self.factory_generation_speech_test(model, input_text) speech_model = SeamlessM4Tv2ForSpeechToSpeech.from_pretrained(self.tmpdirname) self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") output_speech = self.factory_generation_speech_test(model, input_speech) self.assertListEqual(output_original_text[0].ravel().tolist(), output_text[0].ravel().tolist()) self.assertListEqual(output_original_text[1].ravel().tolist(), output_text[1].ravel().tolist()) self.assertTrue( output_original_speech[0].ravel().tolist() == output_speech[0].ravel().tolist(), "Speech generated was different", ) self.assertTrue( output_original_speech[1].ravel().tolist() == output_speech[1].ravel().tolist(), "Speech generated was different", ) def test_text_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() input_speech["generate_speech"] = False input_text["generate_speech"] = False model = SeamlessM4Tv2Model(config=config) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) input_speech.pop("generate_speech") input_text.pop("generate_speech") state_dict = model.state_dict() text_model = SeamlessM4Tv2ForTextToText.from_pretrained(self.tmpdirname) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() for name, tensor in text_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist()) output_text = self.factory_generation_speech_test(text_model, input_text) speech_model = SeamlessM4Tv2ForSpeechToText.from_pretrained(self.tmpdirname) for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() output_speech = self.factory_generation_speech_test(speech_model, input_speech) self.assertListEqual(output_original_text[0].ravel().tolist(), output_text.ravel().tolist()) self.assertListEqual(output_original_speech[0].ravel().tolist(), output_speech.ravel().tolist()) def test_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() input_speech["num_beams"] = 3 input_speech["do_sample"] = True input_speech["temperature"] = 0.5 input_speech["num_return_sequences"] = 3 input_text["num_beams"] = 3 input_text["do_sample"] = True input_text["temperature"] = 0.5 input_text["num_return_sequences"] = 3 for model_class in [SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2Model]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_speech) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_speech["input_features"].shape[0]) for model_class in [SeamlessM4Tv2ForTextToSpeech, SeamlessM4Tv2ForTextToText, SeamlessM4Tv2Model]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_text) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_text["input_ids"].shape[0]) @require_torch class SeamlessM4Tv2ModelIntegrationTest(unittest.TestCase): repo_id = "facebook/seamless-m4t-v2-large" def assertListAlmostEqual(self, list1, list2, tol=1e-4): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) @cached_property def processor(self): return SeamlessM4TProcessor.from_pretrained(self.repo_id) @cached_property def input_text(self): input_ids = torch.tensor([[256026, 109, 247729, 171, 128, 6816, 247676, 3]]) input_ids = input_ids.to(torch_device) attention_mask = torch.ones_like(input_ids).to(torch_device) inputs = { "attention_mask": attention_mask, "input_ids": input_ids, } return inputs @cached_property def input_audio(self): set_seed(0) seq_len = 20000 sampling_rate = 16000 input_features = torch.rand((2, seq_len)) return self.processor(audios=[input_features.tolist()], sampling_rate=sampling_rate, return_tensors="pt").to( torch_device ) def factory_test_task(self, class1, class2, inputs, class1_kwargs, class2_kwargs): model1 = class1.from_pretrained(self.repo_id).to(torch_device) model2 = class2.from_pretrained(self.repo_id).to(torch_device) set_seed(0) output_1 = model1.generate(**inputs, **class1_kwargs) set_seed(0) output_2 = model2.generate(**inputs, **class2_kwargs) for key in output_1: if isinstance(output_1[key], torch.Tensor): if len(output_1[key].shape) == 0: self.assertEqual(output_1[key].item(), output_2[key].item()) else: self.assertListAlmostEqual(output_1[key].squeeze().tolist(), output_2[key].squeeze().tolist()) @slow def test_to_eng_text(self): model = SeamlessM4Tv2Model.from_pretrained(self.repo_id).to(torch_device) expected_text_tokens = [3, 256022, 3080, 1, 247669, 10, 6816, 247676, 3] expected_unit_tokens = [ 4746,7163,8208,8208,1315,1266,4307,1119,989,9594,3007,3007,4341,5205,7631,7631,3202,4061,9092,3191,7509,1715, 5280,5280,3554,8812,8197,6366,5382,5382,7330,2758,9433,9433,6863,7510,5800,5800,5286,1948,1825,1825,3956,8724, 8724,5331,8914,9315,9315,5288,2588,8167,8787,8787,8063,6008,2621,2621,2621,5696 ] expected_wav_slice = [9.485097e-04, 8.320558e-04, 7.178137e-04, 9.349979e-04, 1.121628e-03, 1.091766e-03, 1.279693e-03, 1.387754e-03, 1.296396e-03, 1.143557e-03] set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="eng", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual( expected_unit_tokens, (output.unit_sequences - model.config.vocoder_offset).squeeze().tolist() ) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) self.assertListAlmostEqual( [-2.349690e-04, 9.920777e-02], [output.waveform.mean().item(), output.waveform.std().item()] ) @slow @unittest.skip(reason="Equivalence is broken since a new update") def test_to_swh_text(self): model = SeamlessM4Tv2Model.from_pretrained(self.repo_id).to(torch_device) expected_text_tokens = [3, 256084, 109, 247729, 171, 10, 6816, 247676, 3] expected_unit_tokens = [ 5725,7163,7472,7472,6915,3099,3099,9921,2765,6515,6515,1374,1374,1347,8252,9854,9854,5662,2420,6600,2216,4503, 7208,6107,6107,7298,9123,6472,9663,9663,6366,6366,6445,575,3575,2052,2052,5788,5800,5800,5286,5286,1825,1825,3956, 3956,8724,8724,5331,8914,8914,9315,9315,2821,8167,8167,8787,8787,8787,8700,8700,8700,2175,2175,3196,3196,2621,1725, 1725,7507,5696 ] expected_wav_slice = [3.124037e-04, 2.450471e-04, 2.286572e-04, 2.317214e-04, 2.732605e-04, 2.478790e-04, 2.704144e-04, 2.665847e-04, 2.828784e-04, 2.684390e-04] set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="swh", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual( expected_unit_tokens, (output.unit_sequences - model.config.vocoder_offset).squeeze().tolist() ) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) self.assertListAlmostEqual( [-2.001826e-04, 8.580012e-02], [output.waveform.mean().item(), output.waveform.std().item()] ) @slow def test_to_rus_speech(self): model = SeamlessM4Tv2Model.from_pretrained(self.repo_id).to(torch_device) expected_text_tokens = [3, 256074, 107, 248213, 404, 247792, 247789, 3] expected_unit_tokens = [ 8976,7163,6915,2728,2728,5198,3318,3318,3686,1049,9643,1200,2052,2052,8196,8196,7624,7624,7555,7555,7555,7555, 9717,9717,4869,8167,8167,8167,8053,972,9362,8167,297,297,297,3993,3993,3993,3993,4660,4660,4660,4660,4660,4660, 7962,7962,225,225,8737,4199 ] expected_wav_slice = [1.415287e-03, 1.360976e-03, 1.297727e-03, 1.305321e-03, 1.352087e-03, 1.283812e-03, 1.352623e-03, 1.387384e-03, 1.449627e-03, 1.411701e-03] set_seed(0) output = model.generate(**self.input_audio, num_beams=1, tgt_lang="rus", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual( expected_unit_tokens, (output.unit_sequences - model.config.vocoder_offset).squeeze().tolist() ) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) self.assertListAlmostEqual( [-2.818016e-04, 7.169888e-02], [output.waveform.mean().item(), output.waveform.std().item()], tol=5e-4 ) @slow def test_text_to_text_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False} kwargs2 = { "tgt_lang": "eng", "output_hidden_states": True, "return_dict_in_generate": True, "output_scores": True, } self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForTextToText, self.input_text, kwargs1, kwargs2) @slow def test_speech_to_text_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False} kwargs2 = { "tgt_lang": "eng", "output_hidden_states": True, "return_dict_in_generate": True, "output_scores": True, } self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToText, self.input_audio, kwargs1, kwargs2) @slow def test_speech_to_speech_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True} self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToSpeech, self.input_audio, kwargs1, kwargs1) @slow def test_text_to_speech_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True} self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForTextToSpeech, self.input_text, kwargs1, kwargs1)
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize imageprocessing create random pytorch tensors test not batched input test batched test not batched input pil images test batched input pil images initialize imageprocessing ade20k has 150 classes and the background is included so labels should be between 0 and 150 coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize image_processing create random pytorch tensors test not batched input test batched test not batched input pil images test batched input pil images initialize image_processing ade20k has 150 classes and the background is included so labels should be between 0 and 150
import unittest from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class SegformerImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_reduce_labels=False, ): size = size if size is not None else {"height": 30, "width": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_reduce_labels = do_reduce_labels def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) def prepare_semantic_single_inputs(): dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image = Image.open(dataset[0]["file"]) map = Image.open(dataset[1]["file"]) return image, map def prepare_semantic_batch_inputs(): dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") image1 = Image.open(dataset[0]["file"]) map1 = Image.open(dataset[1]["file"]) image2 = Image.open(dataset[2]["file"]) map2 = Image.open(dataset[3]["file"]) return [image1, image2], [map1, map2] @require_torch @require_vision class SegformerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = SegformerImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = SegformerImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_reduce_labels")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 30, "width": 30}) self.assertEqual(image_processor.do_reduce_labels, False) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, reduce_labels=True) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.do_reduce_labels, True) def test_call_segmentation_maps(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) encoding = image_processing(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) encoding = image_processing(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) image, segmentation_map = prepare_semantic_single_inputs() encoding = image_processing(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) images, segmentation_maps = prepare_semantic_batch_inputs() encoding = image_processing(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) def test_reduce_labels(self): image_processing = self.image_processing_class(**self.image_processor_dict) image, map = prepare_semantic_single_inputs() encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) image_processing.do_reduce_labels = True encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch segformer model import unittest from transformers import segformerconfig istorchavailable isvisionavailable from transformers models auto import getvalues from transformers testingutils import requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import modelmapping segformerforimageclassification segformerforsemanticsegmentation segformermodel from transformers models segformer modelingsegformer import segformerpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import segformerimageprocessor class segformerconfigtesterconfigtester def createandtestconfigcommonpropertiesself config self configclassself inputsdict self parent asserttruehasattrconfig hiddensizes self parent asserttruehasattrconfig numattentionheads self parent asserttruehasattrconfig numencoderblocks class segformermodeltester def init self parent batchsize13 imagesize64 numchannels3 numencoderblocks4 depths1 1 1 1 srratios8 4 2 1 hiddensizes8 8 16 16 downsamplingrates1 4 8 16 numattentionheads1 1 2 2 istrainingtrue uselabelstrue hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 initializerrange0 02 numlabels3 scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self numencoderblocks numencoderblocks self srratios srratios self depths depths self hiddensizes hiddensizes self downsamplingrates downsamplingrates self numattentionheads numattentionheads self istraining istraining self uselabels uselabels self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self numlabels numlabels self scope scope def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self imagesize self imagesize self numlabels config self getconfig return config pixelvalues labels def getconfigself return segformerconfig imagesizeself imagesize numchannelsself numchannels numencoderblocksself numencoderblocks depthsself depths hiddensizesself hiddensizes numattentionheadsself numattentionheads hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange def createandcheckmodelself config pixelvalues labels model segformermodelconfigconfig model totorchdevice model eval result modelpixelvalues expectedheight expectedwidth self imagesize self downsamplingrates1 2 self parent assertequal result lasthiddenstate shape self batchsize self hiddensizes1 expectedheight expectedwidth def createandcheckforimagesegmentationself config pixelvalues labels config numlabels self numlabels model segformerforsemanticsegmentationconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result logits shape self batchsize self numlabels self imagesize 4 self imagesize 4 result modelpixelvalues labelslabels self parent assertequal result logits shape self batchsize self numlabels self imagesize 4 self imagesize 4 self parent assertgreaterresult loss 0 0 def createandcheckforbinaryimagesegmentationself config pixelvalues labels config numlabels 1 model segformerforsemanticsegmentationconfigconfig model totorchdevice model eval labels torch randint0 1 self batchsize self imagesize self imagesize totorchdevice result modelpixelvalues labelslabels self parent assertgreaterresult loss 0 0 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class segformermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses segformermodel segformerforsemanticsegmentation segformerforimageclassification if istorchavailable else pipelinemodelmapping featureextraction segformermodel imageclassification segformerforimageclassification imagesegmentation segformerforsemanticsegmentation if istorchavailable else fxcompatible true testheadmasking false testpruning false testresizeembeddings false def setupself self modeltester segformermodeltesterself self configtester segformerconfigtesterself configclasssegformerconfig def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforbinaryimagesegmentationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforbinaryimagesegmentationconfigandinputs def testforimagesegmentationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimagesegmentationconfigandinputs unittest skipsegformer does not use inputsembeds def testinputsembedsself pass unittest skipsegformer does not have getinputembeddings method and getoutputembeddings methods def testmodelcommonattributesself pass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions expectednumattentions sumself modeltester depths self assertequallenattentions expectednumattentions check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions expectednumattentions verify the first attentions first block first layer expectedseqlen self modeltester imagesize 4 2 expectedreducedseqlen self modeltester imagesize 4 self modeltester srratios0 2 self assertlistequal listattentions0 shape3 self modeltester numattentionheads0 expectedseqlen expectedreducedseqlen verify the last attentions last block last layer expectedseqlen self modeltester imagesize 32 2 expectedreducedseqlen self modeltester imagesize 32 self modeltester srratios1 2 self assertlistequal listattentions1 shape3 self modeltester numattentionheads1 expectedseqlen expectedreducedseqlen outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions expectednumattentions verify the first attentions first block first layer expectedseqlen self modeltester imagesize 4 2 expectedreducedseqlen self modeltester imagesize 4 self modeltester srratios0 2 self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads0 expectedseqlen expectedreducedseqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers self modeltester numencoderblocks self assertequallenhiddenstates expectednumlayers verify the first hidden states first block self assertlistequal listhiddenstates0 shape3 self modeltester hiddensizes0 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testtrainingself if not self modeltester istraining return config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses if modelclass in getvaluesmodelmapping continue model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass returnlabelstrue loss modelinputs loss loss backward slow def testmodelfrompretrainedself for modelname in segformerpretrainedmodelarchivelist 1 model segformermodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch class segformermodelintegrationtestunittest testcase slow def testinferenceimagesegmentationadeself only resize normalize imageprocessor segformerimageprocessor imagescale512 512 keepratiofalse alignfalse dorandomcropfalse model segformerforsemanticsegmentation frompretrainednvidiasegformerb0finetunedade512512 to torchdevice image prepareimg encodedinputs imageprocessorimagesimage returntensorspt pixelvalues encodedinputs pixelvalues totorchdevice with torch nograd outputs modelpixelvalues expectedshape torch size1 model config numlabels 128 128 self assertequaloutputs logits shape expectedshape expectedslice torch tensor 4 6310 5 5232 6 2356 5 1921 6 1444 6 5996 5 4424 6 2790 6 7574 12 1391 13 3122 13 9554 12 8732 13 9352 14 3563 12 9438 13 8226 14 2513 12 5134 13 4686 14 4915 12 8669 14 4343 14 7758 13 2523 14 5819 15 0694 totorchdevice self asserttruetorch allcloseoutputs logits0 3 3 3 expectedslice atol1e4 slow def testinferenceimagesegmentationcityself only resize normalize imageprocessor segformerimageprocessor imagescale512 512 keepratiofalse alignfalse dorandomcropfalse model segformerforsemanticsegmentation frompretrained nvidiasegformerb1finetunedcityscapes10241024 totorchdevice image prepareimg encodedinputs imageprocessorimagesimage returntensorspt pixelvalues encodedinputs pixelvalues totorchdevice with torch nograd outputs modelpixelvalues expectedshape torch size1 model config numlabels 128 128 self assertequaloutputs logits shape expectedshape expectedslice torch tensor 13 5748 13 9111 12 6500 14 3500 15 3683 14 2328 14 7532 16 0424 15 6087 17 1651 15 8725 12 9653 17 2580 17 3718 14 8223 16 6058 16 8783 16 7452 3 6456 3 0209 1 4203 3 0797 3 1959 2 0000 1 8757 1 9217 1 6997 totorchdevice self asserttruetorch allcloseoutputs logits0 3 3 3 expectedslice atol1e1 slow def testpostprocessingsemanticsegmentationself only resize normalize imageprocessor segformerimageprocessor imagescale512 512 keepratiofalse alignfalse dorandomcropfalse model segformerforsemanticsegmentation frompretrainednvidiasegformerb0finetunedade512512 to torchdevice image prepareimg encodedinputs imageprocessorimagesimage returntensorspt pixelvalues encodedinputs pixelvalues totorchdevice with torch nograd outputs modelpixelvalues outputs logits outputs logits detach cpu segmentation imageprocessor postprocesssemanticsegmentationoutputsoutputs targetsizes500 300 expectedshape torch size500 300 self assertequalsegmentation0 shape expectedshape segmentation imageprocessor postprocesssemanticsegmentationoutputsoutputs expectedshape torch size128 128 self assertequalsegmentation0 shape expectedshape coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch segformer model check that output_attentions also work using config verify the first attentions first block first layer verify the last attentions last block last layer check attention is always last and order is fine verify the first attentions first block first layer verify the first hidden states first block check that output_hidden_states also work using config we will verify our results on an image of cute cats only resize normalize only resize normalize only resize normalize
import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class SegformerConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class SegformerModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[1, 1, 1, 1], sr_ratios=[8, 4, 2, 1], hidden_sizes=[8, 8, 16, 16], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 1, 2, 2], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return SegformerConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = SegformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def create_and_check_for_image_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = SegformerForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss, 0.0) def create_and_check_for_binary_image_segmentation(self, config, pixel_values, labels): config.num_labels = 1 model = SegformerForSemanticSegmentation(config=config) model.to(torch_device) model.eval() labels = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size)).to(torch_device) result = model(pixel_values, labels=labels) self.parent.assertGreater(result.loss, 0.0) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = SegformerModelTester(self) self.config_tester = SegformerConfigTester(self, config_class=SegformerConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_binary_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) @unittest.skip("SegFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) expected_seq_len = (self.model_tester.image_size // 32) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SegformerModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class SegformerModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_segmentation_ade(self): image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_image_segmentation_city(self): image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(torch_device) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-1)) @slow def test_post_processing_semantic_segmentation(self): image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((128, 128)) self.assertEqual(segmentation[0].shape, expected_shape)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow segformer model from future import annotations import inspect import unittest from typing import list tuple from transformers import segformerconfig from transformers fileutils import istfavailable isvisionavailable from transformers testingutils import requiretf slow from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import numpy as np import tensorflow as tf from transformers import tfsegformerforimageclassification tfsegformerforsemanticsegmentation tfsegformermodel from transformers models segformer modelingtfsegformer import tfsegformerpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import segformerimageprocessor class tfsegformerconfigtesterconfigtester def createandtestconfigcommonpropertiesself config self configclassself inputsdict self parent asserttruehasattrconfig hiddensizes self parent asserttruehasattrconfig numattentionheads self parent asserttruehasattrconfig numencoderblocks class tfsegformermodeltester def init self parent batchsize13 imagesize64 numchannels3 numencoderblocks4 depths1 1 1 1 srratios8 4 2 1 hiddensizes8 8 16 16 downsamplingrates1 4 8 16 numattentionheads1 1 2 2 istrainingtrue uselabelstrue hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 initializerrange0 02 numlabels3 scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self numencoderblocks numencoderblocks self srratios srratios self depths depths self hiddensizes hiddensizes self downsamplingrates downsamplingrates self numattentionheads numattentionheads self istraining istraining self uselabels uselabels self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self numlabels numlabels self scope scope def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self imagesize self imagesize self numlabels config self getconfig return config pixelvalues labels def getconfigself return segformerconfig imagesizeself imagesize numchannelsself numchannels numencoderblocksself numencoderblocks depthsself depths hiddensizesself hiddensizes numattentionheadsself numattentionheads hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange numlabelsself numlabels def createandcheckmodelself config pixelvalues labels model tfsegformermodelconfigconfig result modelpixelvalues trainingfalse expectedheight expectedwidth self imagesize self downsamplingrates1 2 self parent assertequal result lasthiddenstate shape self batchsize self hiddensizes1 expectedheight expectedwidth def createandcheckforimagesegmentationself config pixelvalues labels config numlabels self numlabels model tfsegformerforsemanticsegmentationconfig result modelpixelvalues trainingfalse self parent assertequal result logits shape self batchsize self numlabels self imagesize 4 self imagesize 4 result modelpixelvalues labelslabels trainingfalse self parent assertequal result logits shape self batchsize self numlabels self imagesize 4 self imagesize 4 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict def prepareconfigandinputsforkerasfitself forsegmentation bool false configandinputs self prepareconfigandinputs config pixelvalues seglabels configandinputs if forsegmentation inputsdict pixelvalues pixelvalues labels seglabels else inputsdict pixelvalues pixelvalues labels tf zerosself batchsize return config inputsdict requiretf class tfsegformermodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfsegformermodel tfsegformerforimageclassification tfsegformerforsemanticsegmentation if istfavailable else pipelinemodelmapping featureextraction tfsegformermodel imageclassification tfsegformerforimageclassification if istfavailable else testheadmasking false testonnx false testpruning false testresizeembeddings false def setupself self modeltester tfsegformermodeltesterself self configtester tfsegformerconfigtesterself configclasssegformerconfig hastextmodalityfalse def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipsegformer does not use inputsembeds def testinputsembedsself pass unittest skipsegformer does not have getinputembeddings method and getoutputembeddings methods def testmodelcommonattributesself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions expectednumattentions sumself modeltester depths self assertequallenattentions expectednumattentions check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions expectednumattentions verify the first attentions first block first layer expectedseqlen self modeltester imagesize 4 2 expectedreducedseqlen self modeltester imagesize 4 self modeltester srratios0 2 self assertlistequal listattentions0 shape3 self modeltester numattentionheads0 expectedseqlen expectedreducedseqlen verify the last attentions last block last layer expectedseqlen self modeltester imagesize 32 2 expectedreducedseqlen self modeltester imagesize 32 self modeltester srratios1 2 self assertlistequal listattentions1 shape3 self modeltester numattentionheads1 expectedseqlen expectedreducedseqlen outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions expectednumattentions verify the first attentions first block first layer expectedseqlen self modeltester imagesize 4 2 expectedreducedseqlen self modeltester imagesize 4 self modeltester srratios0 2 self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads0 expectedseqlen expectedreducedseqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers self modeltester numencoderblocks self assertequallenhiddenstates expectednumlayers verify the first hidden states first block self assertlistequal listhiddenstates0 shape3 self modeltester hiddensizes0 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testmodeloutputsequivalenceself config inputsdict self modeltester prepareconfigandinputsforcommon def checkequivalencemodel tupleinputs dictinputs additionalkwargs tupleoutput modeltupleinputs returndictfalse additionalkwargs dictoutput modeldictinputs returndicttrue additionalkwargs totuple def recursivechecktupleobject dictobject if isinstancetupleobject list tuple for tupleiterablevalue dictiterablevalue in ziptupleobject dictobject recursivechecktupleiterablevalue dictiterablevalue elif tupleobject is none return else self asserttrue alltf equaltupleobject dictobject msg tuple and dict output are not equal difference f tf math reducemaxtf abstupleobject dictobject recursivechecktupleoutput dictoutput for modelclass in self allmodelclasses model modelclassconfig tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputhiddenstates true if self hasattentions tupleinputs self prepareforclassinputsdict modelclass dictinputs self prepareforclassinputsdict modelclass checkequivalencemodel tupleinputs dictinputs outputattentions true todo incorporate label support for semantic segmentation in testmodelingtfcommon py unittest skipif not istfavailable or lentf config listphysicaldevicesgpu 0 reasontf does not support backprop for grouped convolutions on cpu def testdatasetconversionself super testdatasetconversion def checkkerasfitresultsself valloss1 valloss2 atol2e1 rtol2e1 self asserttruenp allclosevalloss1 valloss2 atolatol rtolrtol unittest skipif not istfavailable or lentf config listphysicaldevicesgpu 0 reasontf does not support backprop for grouped convolutions on cpu slow def testkerasfitself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses since tfsegformermodel cannot operate with the default fit method if modelclass name tfsegformermodel model modelclassconfig if getattrmodel hfcomputeloss none super testkerasfit def testlosscomputationself config inputsdict self modeltester prepareconfigandinputsforcommon def applymodel forsegmentation true if modelclass name tfsegformerforsemanticsegmentation else false the number of elements in the loss should be the same as the number of elements in the label preparedforclass self modeltester prepareconfigandinputsforkerasfit forsegmentationforsegmentation addedlabel preparedforclasssortedpreparedforclass keys inputsdict keys reversetrue0 losssize tf sizeaddedlabel test that model correctly compute the loss with kwargs possibleinputnames inputids pixelvalues inputfeatures inputname possibleinputnames intersectionsetpreparedforclass pop modelinput preparedforclass popinputname loss modelmodelinput preparedforclass0 if modelclass name tfsegformerforsemanticsegmentation semantic segmentation loss is computed similarly as https github comhuggingfacetransformersblobmainsrctransformersmodelingtfutils pyl210 self assertequalloss shape 1 else self assertequalloss shape losssize test that model correctly compute the loss with a dict preparedforclass self modeltester prepareconfigandinputsforkerasfit forsegmentationforsegmentation loss modelpreparedforclass0 if modelclass name tfsegformerforsemanticsegmentation self assertequalloss shape 1 else self assertequalloss shape losssize test that model correctly compute the loss with a tuple labelkeys preparedforclass keys inputsdict keys signature inspect signaturemodel call parameters signaturenames listsignature keys create a dictionary holding the location of the tensors in the tuple tupleindexmapping 0 inputname for labelkey in labelkeys labelkeyindex signaturenames indexlabelkey tupleindexmappinglabelkeyindex labelkey sortedtupleindexmapping sortedtupleindexmapping items initialize a list with their default values update the values and convert to a tuple listinput for name in signaturenames if name kwargs listinput appendsignaturename default for index value in sortedtupleindexmapping listinputindex preparedforclassvalue tupleinput tuplelistinput send to model loss modeltupleinput 10 if modelclass name tfsegformerforsemanticsegmentation self assertequalloss shape 1 else self assertequalloss shape losssize for modelclass in self allmodelclasses since tfsegformermodel won t have labels against which we could compute loss if modelclass name tfsegformermodel model modelclassconfig applymodel def checkpttfoutputsself tfoutputs ptoutputs modelclass tol2e4 nameoutputs attributesnone we override with a slightly higher tol value as semseg models tend to diverge a bit more super checkpttfoutputstfoutputs ptoutputs modelclass tol name attributes slow def testmodelfrompretrainedself for modelname in tfsegformerpretrainedmodelarchivelist 1 model tfsegformermodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretf class tfsegformermodelintegrationtestunittest testcase slow def testinferenceimagesegmentationadeself only resize normalize imageprocessor segformerimageprocessor imagescale512 512 keepratiofalse alignfalse dorandomcropfalse model tfsegformerforsemanticsegmentation frompretrainednvidiasegformerb0finetunedade512512 image prepareimg encodedinputs imageprocessorimagesimage returntensorstf pixelvalues encodedinputs pixelvalues outputs modelpixelvalues trainingfalse expectedshape tf tensorshape1 model config numlabels 128 128 self assertequaloutputs logits shape expectedshape expectedslice tf constant 4 6310 5 5232 6 2356 5 1921 6 1444 6 5996 5 4424 6 2790 6 7574 12 1391 13 3122 13 9554 12 8732 13 9352 14 3563 12 9438 13 8226 14 2513 12 5134 13 4686 14 4915 12 8669 14 4343 14 7758 13 2523 14 5819 15 0694 tf debugging assertnearoutputs logits0 3 3 3 expectedslice atol1e4 slow def testinferenceimagesegmentationcityself only resize normalize imageprocessor segformerimageprocessor imagescale512 512 keepratiofalse alignfalse dorandomcropfalse model tfsegformerforsemanticsegmentation frompretrained nvidiasegformerb1finetunedcityscapes10241024 image prepareimg encodedinputs imageprocessorimagesimage returntensorstf pixelvalues encodedinputs pixelvalues outputs modelpixelvalues trainingfalse expectedshape tf tensorshape1 model config numlabels 128 128 self assertequaloutputs logits shape expectedshape expectedslice tf constant 13 5748 13 9111 12 6500 14 3500 15 3683 14 2328 14 7532 16 0424 15 6087 17 1651 15 8725 12 9653 17 2580 17 3718 14 8223 16 6058 16 8783 16 7452 3 6456 3 0209 1 4203 3 0797 3 1959 2 0000 1 8757 1 9217 1 6997 tf debugging assertnearoutputs logits0 3 3 3 expectedslice atol1e1 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow segformer model signature parameters is an ordereddict so arg_names order is deterministic check that output_attentions also work using config verify the first attentions first block first layer verify the last attentions last block last layer check attention is always last and order is fine verify the first attentions first block first layer verify the first hidden states first block check that output_hidden_states also work using config todo incorporate label support for semantic segmentation in test_modeling_tf_common py since tfsegformermodel cannot operate with the default fit method the number of elements in the loss should be the same as the number of elements in the label test that model correctly compute the loss with kwargs semantic segmentation loss is computed similarly as https github com huggingface transformers blob main src transformers modeling_tf_utils py l210 test that model correctly compute the loss with a dict test that model correctly compute the loss with a tuple create a dictionary holding the location of the tensors in the tuple initialize a list with their default values update the values and convert to a tuple send to model since tfsegformermodel won t have labels against which we could compute loss we override with a slightly higher tol value as semseg models tend to diverge a bit more we will verify our results on an image of cute cats only resize normalize only resize normalize
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import SegformerConfig from transformers.file_utils import is_tf_available, is_vision_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFSegformerForImageClassification, TFSegformerForSemanticSegmentation, TFSegformerModel from transformers.models.segformer.modeling_tf_segformer import TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class TFSegformerConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class TFSegformerModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[1, 1, 1, 1], sr_ratios=[8, 4, 2, 1], hidden_sizes=[8, 8, 16, 16], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 1, 2, 2], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return SegformerConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = TFSegformerModel(config=config) result = model(pixel_values, training=False) expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def create_and_check_for_image_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = TFSegformerForSemanticSegmentation(config) result = model(pixel_values, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def prepare_config_and_inputs_for_keras_fit(self, for_segmentation: bool = False): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, seg_labels = config_and_inputs if for_segmentation: inputs_dict = {"pixel_values": pixel_values, "labels": seg_labels} else: inputs_dict = {"pixel_values": pixel_values, "labels": tf.zeros((self.batch_size))} return config, inputs_dict @require_tf class TFSegformerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFSegformerModel, TFSegformerForImageClassification, TFSegformerForSemanticSegmentation) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFSegformerModel, "image-classification": TFSegformerForImageClassification} if is_tf_available() else {} ) test_head_masking = False test_onnx = False test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = TFSegformerModelTester(self) self.config_tester = TFSegformerConfigTester(self, config_class=SegformerConfig, has_text_modality=False) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip("SegFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) expected_seq_len = (self.model_tester.image_size // 32) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(tuple_object, dict_object)), msg=( "Tuple and dict output are not equal. Difference:" f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() def check_keras_fit_results(self, val_loss1, val_loss2, atol=2e-1, rtol=2e-1): self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) @slow def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ != "TFSegformerModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): super().test_keras_fit() def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def apply(model): for_segmentation = True if model_class.__name__ == "TFSegformerForSemanticSegmentation" else False _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit( for_segmentation=for_segmentation ) added_label = prepared_for_class[sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0]] loss_size = tf.size(added_label) possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) loss = model(model_input, **prepared_for_class)[0] if model_class.__name__ == "TFSegformerForSemanticSegmentation": self.assertEqual(loss.shape, (1,)) else: self.assertEqual(loss.shape, [loss_size]) _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit( for_segmentation=for_segmentation ) loss = model(**prepared_for_class)[0] if model_class.__name__ == "TFSegformerForSemanticSegmentation": self.assertEqual(loss.shape, (1,)) else: self.assertEqual(loss.shape, [loss_size]) label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) loss = model(tuple_input[:-1])[0] if model_class.__name__ == "TFSegformerForSemanticSegmentation": self.assertEqual(loss.shape, (1,)) else: self.assertEqual(loss.shape, [loss_size]) for model_class in self.all_model_classes: if model_class.__name__ != "TFSegformerModel": model = model_class(config) apply(model) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) @slow def test_model_from_pretrained(self): for model_name in TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFSegformerModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf class TFSegformerModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_segmentation_ade(self): image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = TFSegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="tf") pixel_values = encoded_inputs.pixel_values outputs = model(pixel_values, training=False) expected_shape = tf.TensorShape((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) tf.debugging.assert_near(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-4) @slow def test_inference_image_segmentation_city(self): image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = TFSegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="tf") pixel_values = encoded_inputs.pixel_values outputs = model(pixel_values, training=False) expected_shape = tf.TensorShape((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) tf.debugging.assert_near(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-1)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch hubert model import math import unittest import pytest from transformers import sewconfig istorchavailable from transformers testingutils import requiresoundfile requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import sewforctc sewforsequenceclassification sewmodel wav2vec2featureextractor wav2vec2processor from transformers models hubert modelinghubert import computemaskindices class sewmodeltester def init self parent batchsize13 seqlength1024 speech is longer istrainingfalse hiddensize32 featextractnormgroup featextractdropout0 0 featextractactivationgelu convdim64 32 32 convstride5 2 1 convkernel10 3 1 convbiasfalse numconvposembeddings31 numconvposembeddinggroups2 squeezefactor2 numhiddenlayers2 numattentionheads2 hiddendropout0 1 intermediatesize20 layernormeps1e5 hiddenactgelu initializerrange0 02 vocabsize32 dostablelayernormfalse scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self hiddensize hiddensize self featextractnorm featextractnorm self featextractdropout featextractdropout self featextractactivation featextractactivation self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self squeezefactor squeezefactor self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddendropout hiddendropout self intermediatesize intermediatesize self layernormeps layernormeps self hiddenact hiddenact self initializerrange initializerrange self vocabsize vocabsize self dostablelayernorm dostablelayernorm self scope scope outputseqlength self seqlength for kernel stride in zipself convkernel self convstride outputseqlength outputseqlength kernel 1 stride self outputseqlength intmath ceiloutputseqlength self encoderseqlength self outputseqlength self squeezefactor def prepareconfigandinputsself inputvalues floatstensorself batchsize self seqlength scale1 0 attentionmask randomattentionmaskself batchsize self seqlength config self getconfig return config inputvalues attentionmask def getconfigself return sewconfig hiddensizeself hiddensize featextractnormself featextractnorm featextractdropoutself featextractdropout featextractactivationself featextractactivation convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups squeezefactorself squeezefactor numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddendropoutself hiddendropout intermediatesizeself intermediatesize layernormepsself layernormeps hiddenactself hiddenact initializerrangeself initializerrange vocabsizeself vocabsize def createandcheckmodelself config inputvalues attentionmask model sewmodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckbatchinferenceself config inputvalues args test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 model sewmodelconfigconfig model totorchdevice model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch bool inputlengths inputvalues shape1 i for i in 4 2 1 pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 0 batchoutputs modelinputvalues attentionmaskattentionmask lasthiddenstate for i in rangeinputvalues shape0 inputslice inputvaluesi i 1 inputlengthsi output modelinputslice lasthiddenstate batchoutput batchoutputsi i 1 output shape1 self parent asserttruetorch allcloseoutput batchoutput atol1e3 def checkctclossself config inputvalues args model sewforctcconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 minmaxlengthlabels 1 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 model config ctclossreduction sum sumloss modelinputvalues attentionmaskattentionmask labelslabels loss item model config ctclossreduction mean meanloss modelinputvalues attentionmaskattentionmask labelslabels loss item self parent asserttrueisinstancesumloss float self parent asserttrueisinstancemeanloss float def checkctctrainingself config inputvalues args config ctczeroinfinity true model sewforctcconfigconfig model totorchdevice model train freeze feature encoder model freezefeatureencoder inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 if maxlengthlabelsi labels shape1 it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf labelsi maxlengthlabelsi 1 100 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkseqclassifierlossself config inputvalues args model sewforsequenceclassificationconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 maskedloss modelinputvalues attentionmaskattentionmask labelslabels loss item unmaskedloss modelinputvalues labelslabels loss item self parent asserttrueisinstancemaskedloss float self parent asserttrueisinstanceunmaskedloss float self parent asserttruemaskedloss unmaskedloss def checkseqclassifiertrainingself config inputvalues args config ctczeroinfinity true model sewforsequenceclassificationconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checklabelsoutofvocabself config inputvalues args model sewforctcconfig model totorchdevice model train inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize 100 with pytest raisesvalueerror modelinputvalues labelslabels def prepareconfigandinputsforcommonself config inputvalues attentionmask self prepareconfigandinputs inputsdict inputvalues inputvalues attentionmask attentionmask return config inputsdict requiretorch class sewmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses sewforctc sewmodel sewforsequenceclassification if istorchavailable else pipelinemodelmapping audioclassification sewforsequenceclassification automaticspeechrecognition sewforctc featureextraction sewmodel if istorchavailable else testpruning false testheadmasking false def setupself self modeltester sewmodeltesterself self configtester configtesterself configclasssewconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs hubert has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass sew cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass sew has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight maskedspecembed quantizer weightproj weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model sewmodel frompretrainedasappsewtiny100k self assertisnotnonemodel requiretorch class sewutilstestunittest testcase def testcomputemaskindicesself batchsize 4 sequencelength 60 maskprob 0 5 masklength 1 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice self assertlistequalmask sumaxis1 tolist maskprob sequencelength for in rangebatchsize def testcomputemaskindicesoverlapself batchsize 4 sequencelength 80 maskprob 0 5 masklength 4 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice because of overlap mask don t have to add up exactly to maskprob sequencelength but have to be smaller or equal for batchsum in mask sumaxis1 self asserttrueintbatchsum maskprob sequencelength requiretorch requiresoundfile slow class sewmodelintegrationtestunittest testcase def loaddatasamplesself numsamples from datasets import loaddataset ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid filter lambda x xid in f1272141231000i for i in rangenumsamples numsamplesaudio return xarray for x in speechsamples def testinferencepretrainedbatchedself model sewmodel frompretrainedasappsewtiny100k totorchdevice processor wav2vec2featureextractor frompretrainedasappsewtiny100k inputspeech self loaddatasamples2 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice with torch nograd outputs modelinputvalues lasthiddenstate expected outputs taken from the original sew implementation expectedoutputsfirst torch tensor 0 1509 0 5372 0 3061 0 1694 0 1700 0 5764 0 2753 0 1299 0 1281 0 7949 0 2342 0 1624 0 1627 0 6710 0 2215 0 1317 0 0408 1 4355 0 8605 0 0968 0 0393 1 2368 0 6826 0 0364 0 1269 1 9215 1 1677 0 1297 0 1654 1 6524 0 6877 0 0196 devicetorchdevice expectedoutputslast torch tensor 1 3379 0 1450 0 1500 0 0515 0 8364 0 1680 0 1248 0 0689 1 2791 0 1507 0 1523 0 0564 0 8208 0 1690 0 1199 0 0751 0 6959 0 0861 0 1235 0 0861 0 4700 0 1686 0 1141 0 1199 1 0776 0 1137 0 0124 0 0472 0 5774 0 1675 0 0376 0 0823 devicetorchdevice expectedoutputsum 62146 7422 self asserttruetorch allcloseoutputs 4 4 expectedoutputsfirst atol5e3 self asserttruetorch allcloseoutputs 4 4 expectedoutputslast atol5e3 self asserttrueabsoutputs sum expectedoutputsum 5 def testinferencectcbatchedself model sewforctc frompretrainedasappsewtiny100kftls100h totorchdevice processor wav2vec2processor frompretrainedasappsewtiny100kftls100h dolowercasetrue inputspeech self loaddatasamples2 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice with torch nograd logits modelinputvalues logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist swet covered brian s body trickling into the tightloine closs hat was the only garment he wore self assertlistequalpredictedtrans expectedtranscriptions coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch hubert model speech is longer test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 pad input make sure that dropout is disabled pad input freeze feature encoder pad input it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf make sure that dropout is disabled pad input freeze everything but the classification head pad input hubert has no inputs_embeds input_ids is renamed to input_values sew cannot resize token embeddings since it has no tokens embeddings sew has no inputs_embeds and thus the get_input_embeddings fn is not implemented no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common because of overlap mask don t have to add up exactly to mask_prob sequence_length but have to be smaller or equal automatic decoding with librispeech expected outputs taken from the original sew implementation
import math import unittest import pytest from transformers import SEWConfig, is_torch_available from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SEWForCTC, SEWForSequenceClassification, SEWModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.models.hubert.modeling_hubert import _compute_mask_indices class SEWModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=32, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(64, 32, 32), conv_stride=(5, 2, 1), conv_kernel=(10, 3, 1), conv_bias=False, num_conv_pos_embeddings=31, num_conv_pos_embedding_groups=2, squeeze_factor=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.squeeze_factor = squeeze_factor self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length // self.squeeze_factor def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return SEWConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, squeeze_factor=self.squeeze_factor, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout=self.hidden_dropout, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, ) def create_and_check_model(self, config, input_values, attention_mask): model = SEWModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): model = SEWModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = SEWForCTC(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = SEWForCTC(config=config) model.to(torch_device) model.train() model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_loss(self, config, input_values, *args): model = SEWForSequenceClassification(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = SEWForSequenceClassification(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = SEWForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class SEWModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SEWForCTC, SEWModel, SEWForSequenceClassification) if is_torch_available() else () pipeline_model_mapping = ( { "audio-classification": SEWForSequenceClassification, "automatic-speech-recognition": SEWForCTC, "feature-extraction": SEWModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = SEWModelTester(self) self.config_tester = ConfigTester(self, config_class=SEWConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "quantizer.weight_proj.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = SEWModel.from_pretrained("asapp/sew-tiny-100k") self.assertIsNotNone(model) @require_torch class SEWUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) @require_torch @require_soundfile @slow class SEWModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_pretrained_batched(self): model = SEWModel.from_pretrained("asapp/sew-tiny-100k").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("asapp/sew-tiny-100k") input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): outputs = model(input_values).last_hidden_state expected_outputs_first = torch.tensor( [ [ [0.1509, 0.5372, 0.3061, -0.1694], [-0.1700, 0.5764, 0.2753, -0.1299], [0.1281, 0.7949, 0.2342, -0.1624], [-0.1627, 0.6710, 0.2215, -0.1317], ], [ [0.0408, 1.4355, 0.8605, -0.0968], [0.0393, 1.2368, 0.6826, 0.0364], [-0.1269, 1.9215, 1.1677, -0.1297], [-0.1654, 1.6524, 0.6877, -0.0196], ], ], device=torch_device, ) expected_outputs_last = torch.tensor( [ [ [1.3379, -0.1450, -0.1500, -0.0515], [0.8364, -0.1680, -0.1248, -0.0689], [1.2791, -0.1507, -0.1523, -0.0564], [0.8208, -0.1690, -0.1199, -0.0751], ], [ [0.6959, -0.0861, -0.1235, -0.0861], [0.4700, -0.1686, -0.1141, -0.1199], [1.0776, -0.1137, -0.0124, -0.0472], [0.5774, -0.1675, -0.0376, -0.0823], ], ], device=torch_device, ) expected_output_sum = 62146.7422 self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=5e-3)) self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=5e-3)) self.assertTrue(abs(outputs.sum() - expected_output_sum) < 5) def test_inference_ctc_batched(self): model = SEWForCTC.from_pretrained("asapp/sew-tiny-100k-ft-ls100h").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("asapp/sew-tiny-100k-ft-ls100h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "swet covered brian's body trickling into the tightloine closs hat was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch hubert model import math import unittest import pytest from transformers import sewdconfig istorchavailable from transformers testingutils import requiresoundfile requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import sewdforctc sewdforsequenceclassification sewdmodel wav2vec2featureextractor wav2vec2processor from transformers models hubert modelinghubert import computemaskindices class sewdmodeltester def init self parent batchsize13 seqlength1024 speech is longer istrainingfalse hiddensize32 featextractnormgroup featextractdropout0 0 featextractactivationgelu convdim64 32 32 convstride5 2 1 convkernel10 3 1 convbiasfalse numconvposembeddings31 numconvposembeddinggroups2 squeezefactor2 maxpositionembeddings512 positionbuckets256 shareattkeytrue relativeattentiontrue positionbiasedinputfalse posatttypep2c c2p normrelebdlayernorm numhiddenlayers2 numattentionheads2 hiddendropout0 1 intermediatesize20 layernormeps1e5 hiddenactgelu initializerrange0 02 vocabsize32 dostablelayernormfalse scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self hiddensize hiddensize self featextractnorm featextractnorm self featextractdropout featextractdropout self featextractactivation featextractactivation self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self squeezefactor squeezefactor self maxpositionembeddings maxpositionembeddings self positionbuckets positionbuckets self shareattkey shareattkey self relativeattention relativeattention self positionbiasedinput positionbiasedinput self posatttype posatttype self normrelebd normrelebd self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddendropout hiddendropout self intermediatesize intermediatesize self layernormeps layernormeps self hiddenact hiddenact self initializerrange initializerrange self vocabsize vocabsize self dostablelayernorm dostablelayernorm self scope scope outputseqlength self seqlength for kernel stride in zipself convkernel self convstride outputseqlength outputseqlength kernel 1 stride self outputseqlength intmath ceiloutputseqlength self encoderseqlength self outputseqlength self squeezefactor def prepareconfigandinputsself inputvalues floatstensorself batchsize self seqlength scale1 0 attentionmask randomattentionmaskself batchsize self seqlength config self getconfig return config inputvalues attentionmask def getconfigself return sewdconfig hiddensizeself hiddensize featextractnormself featextractnorm featextractdropoutself featextractdropout featextractactivationself featextractactivation convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups squeezefactorself squeezefactor maxpositionembeddingsself maxpositionembeddings positionbucketsself positionbuckets shareattkeyself shareattkey relativeattentionself relativeattention positionbiasedinputself positionbiasedinput posatttypeself posatttype normrelebdself normrelebd numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddendropoutself hiddendropout intermediatesizeself intermediatesize layernormepsself layernormeps hiddenactself hiddenact initializerrangeself initializerrange vocabsizeself vocabsize def createandcheckmodelself config inputvalues attentionmask model sewdmodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckbatchinferenceself config inputvalues args test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 model sewdmodelconfigconfig model totorchdevice model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch bool inputlengths inputvalues shape1 i for i in 4 2 1 pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 0 batchoutputs modelinputvalues attentionmaskattentionmask lasthiddenstate for i in rangeinputvalues shape0 inputslice inputvaluesi i 1 inputlengthsi output modelinputslice lasthiddenstate batchoutput batchoutputsi i 1 output shape1 self parent asserttruetorch allcloseoutput batchoutput atol1e3 def checkctclossself config inputvalues args model sewdforctcconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 minmaxlengthlabels 1 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 model config ctclossreduction sum sumloss modelinputvalues attentionmaskattentionmask labelslabels loss item model config ctclossreduction mean meanloss modelinputvalues attentionmaskattentionmask labelslabels loss item self parent asserttrueisinstancesumloss float self parent asserttrueisinstancemeanloss float def checkctctrainingself config inputvalues args config ctczeroinfinity true model sewdforctcconfigconfig model totorchdevice model train freeze feature encoder model freezefeatureencoder inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 if maxlengthlabelsi labels shape1 it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf labelsi maxlengthlabelsi 1 100 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkseqclassifierlossself config inputvalues args model sewdforsequenceclassificationconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 maskedloss modelinputvalues attentionmaskattentionmask labelslabels loss item unmaskedloss modelinputvalues labelslabels loss item self parent asserttrueisinstancemaskedloss float self parent asserttrueisinstanceunmaskedloss float self parent asserttruemaskedloss unmaskedloss def checkseqclassifiertrainingself config inputvalues args config ctczeroinfinity true model sewdforsequenceclassificationconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checklabelsoutofvocabself config inputvalues args model sewdforctcconfig model totorchdevice model train inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize 100 with pytest raisesvalueerror modelinputvalues labelslabels def prepareconfigandinputsforcommonself config inputvalues attentionmask self prepareconfigandinputs inputsdict inputvalues inputvalues attentionmask attentionmask return config inputsdict requiretorch class sewdmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses sewdforctc sewdmodel sewdforsequenceclassification if istorchavailable else pipelinemodelmapping audioclassification sewdforsequenceclassification automaticspeechrecognition sewdforctc featureextraction sewdmodel if istorchavailable else testpruning false testheadmasking false testtorchscript false def setupself self modeltester sewdmodeltesterself self configtester configtesterself configclasssewdconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs hubert has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass sew cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass sew has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight maskedspecembed quantizer weightproj weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model sewdmodel frompretrainedasappsewdtiny100k self assertisnotnonemodel requiretorch class sewdutilstestunittest testcase def testcomputemaskindicesself batchsize 4 sequencelength 60 maskprob 0 5 masklength 1 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice self assertlistequalmask sumaxis1 tolist maskprob sequencelength for in rangebatchsize def testcomputemaskindicesoverlapself batchsize 4 sequencelength 80 maskprob 0 5 masklength 4 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice because of overlap mask don t have to add up exactly to maskprob sequencelength but have to be smaller or equal for batchsum in mask sumaxis1 self asserttrueintbatchsum maskprob sequencelength requiretorch requiresoundfile slow class sewdmodelintegrationtestunittest testcase def loaddatasamplesself numsamples from datasets import loaddataset ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid filter lambda x xid in f1272141231000i for i in rangenumsamples numsamplesaudio return xarray for x in speechsamples def testinferencepretrainedbatchedself model sewdmodel frompretrainedasappsewdtiny100k totorchdevice processor wav2vec2featureextractor frompretrainedasappsewdtiny100k inputspeech self loaddatasamples2 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice with torch nograd outputs modelinputvalues lasthiddenstate expected outputs taken from the original sewd implementation expectedoutputsfirst torch tensor 0 1619 0 6995 0 4062 0 1014 0 1364 0 5960 0 0952 0 0873 0 1572 0 5718 0 4228 0 0864 0 1325 0 6823 0 1387 0 0871 0 1296 0 4008 0 4952 0 1450 0 1152 0 3693 0 3037 0 1290 0 1194 0 6074 0 3531 0 1466 0 1113 0 3135 0 2224 0 1338 devicetorchdevice expectedoutputslast torch tensor 0 1577 0 5108 0 8553 0 2550 0 1530 0 3580 0 6143 0 2672 0 1535 0 4954 0 8503 0 1387 0 1572 0 3363 0 6217 0 1490 0 1338 0 5459 0 9607 0 1133 0 1502 0 3738 0 7313 0 0986 0 0953 0 4708 1 0821 0 0944 0 1474 0 3598 0 7248 0 0748 devicetorchdevice expectedoutputsum 54201 0469 self asserttruetorch allcloseoutputs 4 4 expectedoutputsfirst atol1e3 self asserttruetorch allcloseoutputs 4 4 expectedoutputslast atol1e3 self asserttrueabsoutputs sum expectedoutputsum 1 def testinferencectcbatchedself model sewdforctc frompretrainedasappsewdtiny100kftls100h totorchdevice processor wav2vec2processor frompretrainedasappsewdtiny100kftls100h dolowercasetrue inputspeech self loaddatasamples2 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice with torch nograd logits modelinputvalues logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist swet covered breon s body trickling into the titlowing closs that was the only garmened he war self assertlistequalpredictedtrans expectedtranscriptions coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch hubert model speech is longer test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 pad input make sure that dropout is disabled pad input freeze feature encoder pad input it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf make sure that dropout is disabled pad input freeze everything but the classification head pad input hubert has no inputs_embeds input_ids is renamed to input_values sew cannot resize token embeddings since it has no tokens embeddings sew has no inputs_embeds and thus the get_input_embeddings fn is not implemented no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common because of overlap mask don t have to add up exactly to mask_prob sequence_length but have to be smaller or equal automatic decoding with librispeech expected outputs taken from the original sew d implementation
import math import unittest import pytest from transformers import SEWDConfig, is_torch_available from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SEWDForCTC, SEWDForSequenceClassification, SEWDModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.models.hubert.modeling_hubert import _compute_mask_indices class SEWDModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=32, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(64, 32, 32), conv_stride=(5, 2, 1), conv_kernel=(10, 3, 1), conv_bias=False, num_conv_pos_embeddings=31, num_conv_pos_embedding_groups=2, squeeze_factor=2, max_position_embeddings=512, position_buckets=256, share_att_key=True, relative_attention=True, position_biased_input=False, pos_att_type=("p2c", "c2p"), norm_rel_ebd="layer_norm", num_hidden_layers=2, num_attention_heads=2, hidden_dropout=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.squeeze_factor = squeeze_factor self.max_position_embeddings = max_position_embeddings self.position_buckets = position_buckets self.share_att_key = share_att_key self.relative_attention = relative_attention self.position_biased_input = position_biased_input self.pos_att_type = pos_att_type self.norm_rel_ebd = norm_rel_ebd self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length // self.squeeze_factor def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return SEWDConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, squeeze_factor=self.squeeze_factor, max_position_embeddings=self.max_position_embeddings, position_buckets=self.position_buckets, share_att_key=self.share_att_key, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, norm_rel_ebd=self.norm_rel_ebd, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout=self.hidden_dropout, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, ) def create_and_check_model(self, config, input_values, attention_mask): model = SEWDModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): model = SEWDModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = SEWDForCTC(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = SEWDForCTC(config=config) model.to(torch_device) model.train() model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_loss(self, config, input_values, *args): model = SEWDForSequenceClassification(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = SEWDForSequenceClassification(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = SEWDForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class SEWDModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SEWDForCTC, SEWDModel, SEWDForSequenceClassification) if is_torch_available() else () pipeline_model_mapping = ( { "audio-classification": SEWDForSequenceClassification, "automatic-speech-recognition": SEWDForCTC, "feature-extraction": SEWDModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = SEWDModelTester(self) self.config_tester = ConfigTester(self, config_class=SEWDConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "quantizer.weight_proj.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k") self.assertIsNotNone(model) @require_torch class SEWDUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) @require_torch @require_soundfile @slow class SEWDModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_pretrained_batched(self): model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("asapp/sew-d-tiny-100k") input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): outputs = model(input_values).last_hidden_state expected_outputs_first = torch.tensor( [ [ [-0.1619, 0.6995, 0.4062, -0.1014], [-0.1364, 0.5960, 0.0952, -0.0873], [-0.1572, 0.5718, 0.4228, -0.0864], [-0.1325, 0.6823, 0.1387, -0.0871], ], [ [-0.1296, 0.4008, 0.4952, -0.1450], [-0.1152, 0.3693, 0.3037, -0.1290], [-0.1194, 0.6074, 0.3531, -0.1466], [-0.1113, 0.3135, 0.2224, -0.1338], ], ], device=torch_device, ) expected_outputs_last = torch.tensor( [ [ [-0.1577, 0.5108, 0.8553, 0.2550], [-0.1530, 0.3580, 0.6143, 0.2672], [-0.1535, 0.4954, 0.8503, 0.1387], [-0.1572, 0.3363, 0.6217, 0.1490], ], [ [-0.1338, 0.5459, 0.9607, -0.1133], [-0.1502, 0.3738, 0.7313, -0.0986], [-0.0953, 0.4708, 1.0821, -0.0944], [-0.1474, 0.3598, 0.7248, -0.0748], ], ], device=torch_device, ) expected_output_sum = 54201.0469 self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=1e-3)) self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=1e-3)) self.assertTrue(abs(outputs.sum() - expected_output_sum) < 1) def test_inference_ctc_batched(self): model = SEWDForCTC.from_pretrained("asapp/sew-d-tiny-100k-ft-ls100h").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("asapp/sew-d-tiny-100k-ft-ls100h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "swet covered breon's body trickling into the titlowing closs that was the only garmened he war", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
codingutf8 2022 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license assert that loading encoder and decoder models from configs has been correctly executed load a model from pretrained encoder and decoder checkpoints setting one encoder and one decoder kwarg opposite to that specified in their respective configs assert that setting encoder and decoder kwargs opposite to those in the configs has correctly been applied make the decoder inputs a different shape from the encoder inputs to harden the test copied from generation utils gpt2 doesn t have padtokenid bert does not have a bos token id so use padtokenid instead copied from testmodelingencoderdecoder py define a dummy loss function for computing the loss over a forward pass transform the loss function to get the gradients compute the loss logits and gradients for the unfrozen model compare to the loss logits and gradients for the frozen model ensure that the logits and losses remain precisely equal ensure that the dicts of gradients contain the same keys ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain nonzero entries when unfrozen ensure that the gradients of all unfrozen layers remain precisely equal i e all layers excluding the frozen featureextractor prepare inputs pt flax flax pt encoderhiddenstates is not used in model callforward avoid the case where a sequence has no place to attend after combined with the causal attention mask flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model check without enctodecproj projection check enctodecproj work as expected check addadapter works as expected make sure that cross attention layers are added prepare inputs pt flax flax pt make sure that cross attention layers are added prepare inputs pt flax flax pt make sure that cross attention layers are added prepare inputs pt flax flax pt coding utf 8 2022 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license assert that loading encoder and decoder models from configs has been correctly executed load a model from pretrained encoder and decoder checkpoints setting one encoder and one decoder kwarg opposite to that specified in their respective configs assert that setting encoder and decoder kwargs opposite to those in the configs has correctly been applied make the decoder inputs a different shape from the encoder inputs to harden the test copied from generation utils gpt2 doesn t have pad_token_id bert does not have a bos token id so use pad_token_id instead copied from test_modeling_encoder_decoder py define a dummy loss function for computing the loss over a forward pass transform the loss function to get the gradients compute the loss logits and gradients for the unfrozen model compare to the loss logits and gradients for the frozen model ensure that the logits and losses remain precisely equal ensure that the dicts of gradients contain the same keys ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain non zero entries when unfrozen ensure that the gradients of all unfrozen layers remain precisely equal i e all layers excluding the frozen feature_extractor prepare inputs pt flax flax pt encoder_hidden_states is not used in model call forward avoid the case where a sequence has no place to attend after combined with the causal attention mask flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model check without enc_to_dec_proj projection check enc_to_dec_proj work as expected check add_adapter works as expected make sure that cross attention layers are added prepare inputs pt flax flax pt make sure that cross attention layers are added prepare inputs pt flax flax pt make sure that cross attention layers are added prepare inputs pt flax flax pt
import tempfile import unittest import numpy as np from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow, torch_device from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bart.test_modeling_flax_bart import FlaxBartStandaloneDecoderModelTester from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..gpt2.test_modeling_flax_gpt2 import FlaxGPT2ModelTester from ..wav2vec2.test_modeling_flax_wav2vec2 import FlaxWav2Vec2ModelTester if is_flax_available(): import jax import jax.numpy as jnp from flax.training.common_utils import onehot from flax.traverse_util import flatten_dict from transformers import ( FlaxBartForCausalLM, FlaxBertForCausalLM, FlaxGPT2LMHeadModel, FlaxSpeechEncoderDecoderModel, FlaxWav2Vec2Model, SpeechEncoderDecoderConfig, ) from transformers.modeling_flax_outputs import FlaxBaseModelOutput from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import SpeechEncoderDecoderModel @require_flax class FlaxEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) self.assertFalse(enc_dec_model.config.tie_word_embeddings) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) encoder_outputs = FlaxBaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1]) outputs_encoder_decoder = enc_dec_model( attention_mask, decoder_input_ids, decoder_attention_mask, encoder_outputs=encoder_outputs ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_save_and_load( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 4e-2) def check_encoder_decoder_model_from_encoder_decoder_pretrained( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) self.assertEqual(config.add_adapter, encoder_model.config.add_adapter) self.assertEqual(decoder_config.use_cache, decoder_model.config.use_cache) with tempfile.TemporaryDirectory() as enc_tmpdir: with tempfile.TemporaryDirectory() as dec_tmpdir: encoder_model.save_pretrained(enc_tmpdir) decoder_model.save_pretrained(dec_tmpdir) enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=enc_tmpdir, decoder_pretrained_model_name_or_path=dec_tmpdir, encoder_add_adapter=not config.add_adapter, decoder_use_cache=not decoder_config.use_cache, ) self.assertNotEqual(config.add_adapter, enc_dec_model.config.encoder.add_adapter) self.assertNotEqual(decoder_config.use_cache, enc_dec_model.config.decoder.use_cache) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_output_attentions( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) seq_len = enc_dec_model._get_feat_extract_output_lengths(inputs.shape[1]) self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), ) def check_encoder_decoder_model_generate(self, inputs, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) pad_token_id = enc_dec_model.config.decoder.pad_token_id eos_token_id = enc_dec_model.config.decoder.eos_token_id decoder_start_token_id = enc_dec_model.config.decoder.decoder_start_token_id if pad_token_id is None and eos_token_id is not None: pad_token_id = eos_token_id if decoder_start_token_id is None: decoder_start_token_id = enc_dec_model.config.decoder.bos_token_id if decoder_start_token_id is None: decoder_start_token_id = pad_token_id generated_output = enc_dec_model.generate( inputs, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, ) generated_sequences = generated_output.sequences self.assertEqual(generated_sequences.shape, (inputs.shape[0],) + (decoder_config.max_length,)) def check_freeze_feature_encoder( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) enc_dec_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) params = enc_dec_model.params def cross_entropy(logits, labels): return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1) def compute_loss( params, inputs, attention_mask, decoder_input_ids, freeze_feature_encoder: bool = False, ): outputs_enc_dec = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, freeze_feature_encoder=freeze_feature_encoder, params=params, ) logits = outputs_enc_dec.logits vocab_size = logits.shape[-1] loss = cross_entropy(logits, onehot(labels=decoder_input_ids, num_classes=vocab_size)).sum() return (loss, logits) grad_fn = jax.value_and_grad(compute_loss, has_aux=True) (loss, logits), grads = grad_fn( params, inputs, attention_mask, decoder_input_ids, freeze_feature_encoder=False ) (loss_frozen, logits_frozen), grads_frozen = grad_fn( params, inputs, attention_mask, decoder_input_ids, freeze_feature_encoder=True ) self.assertTrue((logits == logits_frozen).all()) self.assertEqual(loss, loss_frozen) grads = flatten_dict(grads) grads_frozen = flatten_dict(grads_frozen) self.assertEqual(grads.keys(), grads_frozen.keys()) feature_extractor_grads = tuple(grads[k] for k in grads if "feature_extractor" in k) feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" in k) for feature_extractor_grad, feature_extractor_grad_frozen in zip( feature_extractor_grads, feature_extractor_grads_frozen ): self.assertTrue((feature_extractor_grad_frozen == 0.0).all()) self.assertTrue((feature_extractor_grad > 0.0).any()) grads = tuple(grads[k] for k in grads if "feature_extractor" not in k) grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" not in k) for grad, grad_frozen in zip(grads, grads_frozen): self.assertTrue((grad == grad_frozen).all()) def check_pt_flax_equivalence(self, pt_model, fx_model, inputs_dict): pt_model.to(torch_device) pt_model.eval() flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output, pt_output.numpy(), 1e-5) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 1e-5) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 1e-5) def check_equivalence_pt_to_flax(self, config, decoder_config, inputs_dict): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = SpeechEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def check_equivalence_flax_to_pt(self, config, decoder_config, inputs_dict): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = SpeechEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_encoder_decoder_model_from_encoder_decoder_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_encoder_decoder_pretrained(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_freeze_feature_encoder(self): input_ids_dict = self.prepare_config_and_inputs() self.check_freeze_feature_encoder(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") inputs_dict = config_inputs_dict del inputs_dict["encoder_hidden_states"] batch_size = inputs_dict["decoder_attention_mask"].shape[0] inputs_dict["decoder_attention_mask"] = np.concatenate( [np.ones(shape=(batch_size, 1)), inputs_dict["decoder_attention_mask"][:, 1:]], axis=1 ) decoder_config.use_cache = False self.assertTrue(decoder_config.cross_attention_hidden_size is None) decoder_config.hidden_size = config.hidden_size self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) config.add_adapter = True self.assertTrue(config.add_adapter) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() inputs = ids_tensor([13, 5], model_2.config.encoder.vocab_size) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) attention_mask = ids_tensor([13, 5], vocab_size=2) outputs = model_2( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = FlaxSpeechEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 4e-2) @require_flax class FlaxWav2Vec2GPT2ModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "gpt2-medium" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxWav2Vec2Model(config) decoder_model = FlaxGPT2LMHeadModel(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxWav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = FlaxGPT2ModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, inputs, attention_mask) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs decoder_config.add_cross_attention = True return { "config": config, "inputs": inputs, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } @slow def test_flaxwav2vec2gpt2_pt_flax_equivalence(self): pt_model = SpeechEncoderDecoderModel.from_pretrained("jsnfly/wav2vec2-large-xlsr-53-german-gpt2") fx_model = FlaxSpeechEncoderDecoderModel.from_pretrained( "jsnfly/wav2vec2-large-xlsr-53-german-gpt2", from_pt=True ) pt_model.to(torch_device) pt_model.eval() batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], fx_model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs_dict = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) pt_logits = pt_outputs.logits pt_outputs = pt_outputs.to_tuple() fx_outputs = fx_model(**inputs_dict) fx_logits = fx_outputs.logits fx_outputs = fx_outputs.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict) fx_logits_loaded = fx_outputs_loaded.logits fx_outputs_loaded = fx_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits_loaded, pt_logits.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) pt_logits_loaded = pt_outputs_loaded.logits pt_outputs_loaded = pt_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2) @require_flax class FlaxWav2Vec2BartModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "bart-large" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxWav2Vec2Model(config) decoder_model = FlaxBartForCausalLM(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxWav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = FlaxBartStandaloneDecoderModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, inputs, attention_mask) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs decoder_config.add_cross_attention = True return { "config": config, "inputs": inputs, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } @slow def test_flaxwav2vec2bart_pt_flax_equivalence(self): pt_model = SpeechEncoderDecoderModel.from_pretrained("patrickvonplaten/wav2vec2-2-bart-large") fx_model = FlaxSpeechEncoderDecoderModel.from_pretrained( "patrickvonplaten/wav2vec2-2-bart-large", from_pt=True ) pt_model.to(torch_device) pt_model.eval() batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], fx_model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs_dict = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) pt_logits = pt_outputs.logits pt_outputs = pt_outputs.to_tuple() fx_outputs = fx_model(**inputs_dict) fx_logits = fx_outputs.logits fx_outputs = fx_outputs.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict) fx_logits_loaded = fx_outputs_loaded.logits fx_outputs_loaded = fx_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits_loaded, pt_logits.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) pt_logits_loaded = pt_outputs_loaded.logits pt_outputs_loaded = pt_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2) @require_flax class FlaxWav2Vec2BertModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "bert-large-uncased" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], model.config.encoder.vocab_size) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxWav2Vec2Model(config) decoder_model = FlaxBertForCausalLM(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxWav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = FlaxBertModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, inputs, attention_mask) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs decoder_config.add_cross_attention = True return { "config": config, "inputs": inputs, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } @slow def test_flaxwav2vec2bert_pt_flax_equivalence(self): pt_model = SpeechEncoderDecoderModel.from_pretrained("speech-seq2seq/wav2vec2-2-bert-large") fx_model = FlaxSpeechEncoderDecoderModel.from_pretrained("speech-seq2seq/wav2vec2-2-bert-large", from_pt=True) pt_model.to(torch_device) pt_model.eval() batch_size = 13 input_values = floats_tensor([batch_size, 512], fx_model.config.encoder.vocab_size) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], fx_model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs_dict = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) pt_logits = pt_outputs.logits pt_outputs = pt_outputs.to_tuple() fx_outputs = fx_model(**inputs_dict) fx_logits = fx_outputs.logits fx_outputs = fx_outputs.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict) fx_logits_loaded = fx_outputs_loaded.logits fx_outputs_loaded = fx_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits_loaded, pt_logits.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) pt_logits_loaded = pt_outputs_loaded.logits pt_outputs_loaded = pt_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2)
codingutf8 2021 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make the decoder inputs a different shape from the encoder inputs to harden the test make sure eos token is set to none to prevent early stopping of generation bert does not have a bos token id so use padtokenid instead make sure that cross attention layers are added make sure that cross attention layers are added can t save full model for now because speech2textmodel speech2textencoder can t save full model for now because speech2textmodel speech2textencoder all published pretrained models are speech2textmodel speech2textencoder make sure that cross attention layers are added disable cache for now there are no published pretrained speech2text2forcausallm for now coding utf 8 2021 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make the decoder inputs a different shape from the encoder inputs to harden the test make sure eos token is set to none to prevent early stopping of generation bert does not have a bos token id so use pad_token_id instead make sure that cross attention layers are added make sure that cross attention layers are added can t save full model for now because speech2textmodel speech2textencoder can t save full model for now because speech2textmodel speech2textencoder all published pretrained models are speech2textmodel speech2textencoder make sure that cross attention layers are added disable cache for now there are no published pretrained speech2text2forcausallm for now
import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_bert import BertModelTester from ..speech_to_text.test_modeling_speech_to_text import Speech2TextModelTester from ..speech_to_text_2.test_modeling_speech_to_text_2 import Speech2Text2StandaloneDecoderModelTester from ..wav2vec2.test_modeling_wav2vec2 import Wav2Vec2ModelTester if is_torch_available(): import numpy as np import torch from transformers import ( BertLMHeadModel, Speech2Text2ForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, Wav2Vec2Model, ) from transformers.modeling_outputs import BaseModelOutput from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextEncoder @require_torch class EncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_encoder_decoder_model_from_pretrained_configs( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = SpeechEncoderDecoderModel(encoder_decoder_config) enc_dec_model.to(torch_device) enc_dec_model.eval() self.assertTrue(enc_dec_model.config.is_encoder_decoder) self.assertFalse(enc_dec_model.config.tie_word_embeddings) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) encoder_outputs = BaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1]) outputs_encoder_decoder = enc_dec_model( encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_with_inputs( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): inputs = input_values if input_features is None else input_features encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) outputs_encoder_decoder_kwarg = enc_dec_model( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder_kwarg["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_save_and_load( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = SpeechEncoderDecoderModel.from_pretrained(tmpdirname) enc_dec_model.to(torch_device) after_outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_save_and_load_encoder_decoder_model( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as encoder_tmp_dirname, tempfile.TemporaryDirectory() as decoder_tmp_dirname: enc_dec_model.encoder.save_pretrained(encoder_tmp_dirname) enc_dec_model.decoder.save_pretrained(decoder_tmp_dirname) SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=encoder_tmp_dirname, decoder_pretrained_model_name_or_path=decoder_tmp_dirname, ) after_outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_output_attentions( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, labels=None, input_values=None, input_features=None, **kwargs, ): decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) inputs = input_values if input_features is None else input_features encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) seq_len = enc_dec_model.encoder._get_feat_extract_output_lengths(inputs.shape[1]) self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), ) def check_encoder_decoder_model_generate( self, config, decoder_config, input_values=None, input_features=None, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None inputs = input_values if input_features is None else input_features generated_output = enc_dec_model.generate( inputs, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual(generated_output.shape, (inputs.shape[0],) + (decoder_config.max_length,)) def test_encoder_decoder_model(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_with_inputs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_with_inputs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_save_and_load_from_encoder_decoder_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def test_training_gradient_checkpointing(self): inputs_dict = self.prepare_config_and_inputs() encoder_model, decoder_model = self.get_encoder_decoder_model( inputs_dict["config"], inputs_dict["decoder_config"] ) model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) model.to(torch_device) model.train() model.gradient_checkpointing_enable() model.config.decoder_start_token_id = 0 model.config.pad_token_id = 0 model_inputs = { "attention_mask": inputs_dict["attention_mask"], "labels": inputs_dict["labels"], "decoder_input_ids": inputs_dict["decoder_input_ids"], } inputs = inputs_dict["input_features"] if "input_features" in inputs_dict else inputs_dict["input_values"] loss = model(inputs, **model_inputs).loss loss.backward() @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() model_2.to(torch_device) with torch.no_grad(): outputs = model_2(**inputs) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = SpeechEncoderDecoderModel.from_pretrained(tmp_dirname) model_1.to(torch_device) after_outputs = model_1(**inputs) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_torch class Wav2Vec2BertModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-base-960h", "bert-base-cased" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "input_values": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Wav2Vec2Model(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): bert_model_tester = BertModelTester(self) wav2vec2_model_tester = Wav2Vec2ModelTester(self) encoder_config_and_inputs = wav2vec2_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_values, input_mask, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs decoder_config.add_cross_attention = True return { "config": config, "input_values": input_values, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } @require_torch class Speech2TextBertModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/s2t-small-librispeech-asr", "bert-base-cased" ) batch_size = 13 input_features = floats_tensor([batch_size, 7, 80], scale=1.0) attention_mask = random_attention_mask([batch_size, 7]) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "input_features": input_features, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Speech2TextEncoder(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): bert_model_tester = BertModelTester(self) speech2text_model_tester = Speech2TextModelTester(self) encoder_config_and_inputs = speech2text_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() config, inputs = encoder_config_and_inputs input_features = inputs["input_features"] input_mask = inputs["attention_mask"] ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs decoder_config.add_cross_attention = True return { "config": config, "input_features": input_features, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } def test_encoder_decoder_model_from_pretrained_configs(self): pass def test_save_and_load_from_pretrained(self): pass def test_real_model_save_load_from_pretrained(self): pass @require_torch class Wav2Vec2Speech2Text2(EncoderDecoderMixin, unittest.TestCase): def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Wav2Vec2Model(config).eval() decoder_model = Speech2Text2ForCausalLM(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = Wav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = Speech2Text2StandaloneDecoderModelTester( self, batch_size=13, d_model=32, max_position_embeddings=512 ) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs() ( config, input_values, input_mask, ) = encoder_config_and_inputs (decoder_config, decoder_input_ids, decoder_attention_mask, _) = decoder_config_and_inputs decoder_config.add_cross_attention = True decoder_config.use_cache = False return { "config": config, "input_values": input_values, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "labels": decoder_input_ids, } def test_real_model_save_load_from_pretrained(self): pass
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper testfeatureextractionwhisper floatslist creates a random float32 tensor if rng is none rng globalrng values for batchidx in rangeshape0 values append for in rangeshape1 values1 appendrng random scale return values requiretorch requiretorchaudio class speech2textfeatureextractiontesterunittest testcase def init self parent batchsize7 minseqlength400 maxseqlength2000 featuresize24 nummelbins24 paddingvalue0 0 samplingrate16000 returnattentionmasktrue donormalizetrue self parent parent self batchsize batchsize self minseqlength minseqlength self maxseqlength maxseqlength self seqlengthdiff self maxseqlength self minseqlength self batchsize 1 self featuresize featuresize self nummelbins nummelbins self paddingvalue paddingvalue self samplingrate samplingrate self returnattentionmask returnattentionmask self donormalize donormalize def preparefeatextractdictself return featuresize self featuresize nummelbins self nummelbins paddingvalue self paddingvalue samplingrate self samplingrate returnattentionmask self returnattentionmask donormalize self donormalize def prepareinputsforcommonself equallengthfalse numpifyfalse def flattenlistoflists return listitertools chainlistoflists if equallength speechinputs floatslistself maxseqlength self featuresize for in rangeself batchsize else make sure that inputs increase in size speechinputs floatslistx self featuresize for x in rangeself minseqlength self maxseqlength self seqlengthdiff if numpify speechinputs np asarrayx for x in speechinputs return speechinputs requiretorch requiretorchaudio class speech2textfeatureextractiontestsequencefeatureextractiontestmixin unittest testcase featureextractionclass speech2textfeatureextractor def setupself self featextracttester speech2textfeatureextractiontesterself def checkzeromeanunitvarianceself inputvector self asserttruenp allnp meaninputvector axis0 1e3 self asserttruenp allnp absnp varinputvector axis0 1 1e3 def testcallself tests that all call wrap to encodeplus and batchencodeplus featureextractor self featureextractionclassself featextracttester preparefeatextractdict create three inputs of length 800 1000 and 1200 speechinputs floatslist1 x0 for x in range800 1400 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs test feature size inputfeatures featureextractornpspeechinputs paddingtrue returntensorsnp inputfeatures self asserttrueinputfeatures ndim 3 self asserttrueinputfeatures shape1 featureextractor featuresize test not batched input encodedsequences1 featureextractorspeechinputs0 returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs0 returntensorsnp inputfeatures self asserttruenp allcloseencodedsequences1 encodedsequences2 atol1e3 test batched encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test 2d numpy arrays are batched speechinputs floatslist1 x0 for x in 800 800 800 npspeechinputs np asarrayspeechinputs encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 def testcepstralmeanandvariancenormalizationself featureextractor self featureextractionclassself featextracttester preparefeatextractdict speechinputs floatslist1 x0 for x in range800 1400 200 paddings longest maxlength donotpad maxlengths none 16 none for maxlength padding in zipmaxlengths paddings inputs featureextractor speechinputs paddingpadding maxlengthmaxlength returnattentionmasktrue inputfeatures inputs inputfeatures attentionmask inputs attentionmask fbankfeatlengths np sumx for x in attentionmask self checkzeromeanunitvarianceinputfeatures0 fbankfeatlengths0 self checkzeromeanunitvarianceinputfeatures1 fbankfeatlengths1 self checkzeromeanunitvarianceinputfeatures2 fbankfeatlengths2 def testcepstralmeanandvariancenormalizationnpself featureextractor self featureextractionclassself featextracttester preparefeatextractdict speechinputs floatslist1 x0 for x in range800 1400 200 paddings longest maxlength donotpad maxlengths none 16 none for maxlength padding in zipmaxlengths paddings inputs featureextractor speechinputs maxlengthmaxlength paddingpadding returntensorsnp returnattentionmasktrue inputfeatures inputs inputfeatures attentionmask inputs attentionmask fbankfeatlengths np sumx for x in attentionmask self checkzeromeanunitvarianceinputfeatures0 fbankfeatlengths0 self asserttrueinputfeatures0fbankfeatlengths0 sum 1e6 self checkzeromeanunitvarianceinputfeatures1 fbankfeatlengths1 self asserttrueinputfeatures0fbankfeatlengths1 sum 1e6 self checkzeromeanunitvarianceinputfeatures2 fbankfeatlengths2 def testcepstralmeanandvariancenormalizationtruncmaxlengthself featureextractor self featureextractionclassself featextracttester preparefeatextractdict speechinputs floatslist1 x0 for x in range800 1400 200 inputs featureextractor speechinputs paddingmaxlength maxlength4 truncationtrue returntensorsnp returnattentionmasktrue inputfeatures inputs inputfeatures attentionmask inputs attentionmask fbankfeatlengths np sumattentionmask 1 axis1 self checkzeromeanunitvarianceinputfeatures0 fbankfeatlengths0 self checkzeromeanunitvarianceinputfeatures1 self checkzeromeanunitvarianceinputfeatures2 def testcepstralmeanandvariancenormalizationtrunclongestself featureextractor self featureextractionclassself featextracttester preparefeatextractdict speechinputs floatslist1 x0 for x in range800 1400 200 inputs featureextractor speechinputs paddinglongest maxlength4 truncationtrue returntensorsnp returnattentionmasktrue inputfeatures inputs inputfeatures attentionmask inputs attentionmask fbankfeatlengths np sumattentionmask 1 axis1 self checkzeromeanunitvarianceinputfeatures0 fbankfeatlengths0 self checkzeromeanunitvarianceinputfeatures1 fbankfeatlengths1 self checkzeromeanunitvarianceinputfeatures2 make sure that if maxlength longest then pad to maxlength self assertequalinputfeatures shape 3 4 24 speechinputs floatslist1 x0 for x in range800 1400 200 inputs featureextractor speechinputs paddinglongest maxlength16 truncationtrue returntensorsnp returnattentionmasktrue inputfeatures inputs inputfeatures attentionmask inputs attentionmask fbankfeatlengths np sumattentionmask 1 axis1 self checkzeromeanunitvarianceinputfeatures0 fbankfeatlengths0 self checkzeromeanunitvarianceinputfeatures1 fbankfeatlengths1 self checkzeromeanunitvarianceinputfeatures2 make sure that if maxlength longest then pad to maxlength self assertequalinputfeatures shape 3 6 24 def testdoubleprecisionpadself import torch featureextractor self featureextractionclassself featextracttester preparefeatextractdict npspeechinputs np random rand100 32 astypenp float64 pyspeechinputs npspeechinputs tolist for inputs in pyspeechinputs npspeechinputs npprocessed featureextractor padinputfeatures inputs returntensorsnp self asserttruenpprocessed inputfeatures dtype np float32 ptprocessed featureextractor padinputfeatures inputs returntensorspt self asserttrueptprocessed inputfeatures dtype torch float32 def loaddatasamplesself numsamples from datasets import loaddataset ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples def testintegrationself fmt off expected np array 1 5745 1 7713 1 7020 1 6069 1 2250 1 1105 0 9072 0 8241 1 2310 0 8098 0 3320 0 4101 0 7985 0 4996 0 8213 0 9128 1 0420 1 1286 1 0440 0 7999 0 8405 1 2275 1 5443 1 4625 fmt on inputspeech self loaddatasamples1 featureextractor self featureextractionclassself featextracttester preparefeatextractdict inputfeatures featureextractorinputspeech returntensorspt inputfeatures self assertequalsinputfeatures shape 1 584 24 self asserttruenp allcloseinputfeatures0 0 30 expected atol1e4 def testfeatextractfromandsavepretrainedself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname savedfile featextractfirst savepretrainedtmpdirname0 checkjsonfilehascorrectformatsavedfile featextractsecond self featureextractionclass frompretrainedtmpdirname dictfirst featextractfirst todict dictsecond featextractsecond todict self assertdictequaldictfirst dictsecond def testfeatextracttojsonfileself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname jsonfilepath os path jointmpdirname featextract json featextractfirst tojsonfilejsonfilepath featextractsecond self featureextractionclass fromjsonfilejsonfilepath dictfirst featextractfirst todict dictsecond featextractsecond todict self assertequaldictfirst dictsecond exact same tests than before except that we simulate that torchaudio is not available requiretorch unittest mock patch transformers models speechtotext featureextractionspeechtotext isspeechavailable lambda false class speech2textfeatureextractionwithouttorchaudiotestspeech2textfeatureextractiontest def testusingaudioutilsself tests that it uses audioutils instead of torchaudio featextract self featureextractionclassself featextracttester preparefeatextractdict self asserttruehasattrfeatextract window self asserttruehasattrfeatextract melfilters from transformers models speechtotext featureextractionspeechtotext import isspeechavailable self assertfalseisspeechavailable coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor make sure that inputs increase in size tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test feature size test not batched input test batched test 2 d numpy arrays are batched make sure that if max_length longest then pad to max_length make sure that if max_length longest then pad to max_length automatic decoding with librispeech fmt off fmt on exact same tests than before except that we simulate that torchaudio is not available tests that it uses audio_utils instead of torchaudio
import itertools import os import random import tempfile import unittest import numpy as np from transformers import Speech2TextFeatureExtractor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class Speech2TextFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=24, num_mel_bins=24, padding_value=0.0, sampling_rate=16_000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.num_mel_bins = num_mel_bins self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Speech2TextFeatureExtractor def setUp(self): self.feat_extract_tester = Speech2TextFeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3)) def test_call(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_cepstral_mean_and_variance_normalization(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, padding=padding, max_length=max_length, return_attention_mask=True ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_np(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, max_length=max_length, padding=padding, return_tensors="np", return_attention_mask=True ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_trunc_max_length(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="max_length", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1]) self._check_zero_mean_unit_variance(input_features[2]) def test_cepstral_mean_and_variance_normalization_trunc_longest(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) self.assertEqual(input_features.shape, (3, 4, 24)) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=16, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) self.assertEqual(input_features.shape, (3, 6, 24)) def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration(self): expected = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ]) input_speech = self._load_datasamples(1) feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) input_features = feature_extractor(input_speech, return_tensors="pt").input_features self.assertEquals(input_features.shape, (1, 584, 24)) self.assertTrue(np.allclose(input_features[0, 0, :30], expected, atol=1e-4)) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertDictEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertEqual(dict_first, dict_second) @require_torch @unittest.mock.patch( "transformers.models.speech_to_text.feature_extraction_speech_to_text.is_speech_available", lambda: False ) class Speech2TextFeatureExtractionWithoutTorchaudioTest(Speech2TextFeatureExtractionTest): def test_using_audio_utils(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) self.assertTrue(hasattr(feat_extract, "window")) self.assertTrue(hasattr(feat_extract, "mel_filters")) from transformers.models.speech_to_text.feature_extraction_speech_to_text import is_speech_available self.assertFalse(is_speech_available())
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch speech2text model import copy import inspect import os import tempfile import unittest from transformers import speech2textconfig from transformers testingutils import istorchavailable requiresentencepiece requiretokenizers requiretorch requiretorchfp16 requiretorchaudio slow torchdevice from transformers utils import cachedproperty from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import speech2textforconditionalgeneration speech2textmodel speech2textprocessor from transformers models speechtotext modelingspeechtotext import speech2textdecoder speech2textencoder def preparespeechtotextinputsdict config inputfeatures decoderinputids attentionmasknone decoderattentionmasknone headmasknone decoderheadmasknone crossattnheadmasknone if attentionmask is none attentionmask inputfeatures ne0 if decoderattentionmask is none decoderattentionmask decoderinputids neconfig padtokenid if headmask is none headmask torch onesconfig encoderlayers config encoderattentionheads devicetorchdevice if decoderheadmask is none decoderheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice if crossattnheadmask is none crossattnheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice return inputids inputfeatures inputfeatures inputfeatures decoderinputids decoderinputids attentionmask attentionmask decoderattentionmask attentionmask headmask headmask decoderheadmask decoderheadmask crossattnheadmask crossattnheadmask requiretorch class speech2textmodeltester def init self parent batchsize13 seqlength7 istrainingtrue uselabelsfalse vocabsize99 hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 numconvlayers2 convkernelsizes5 5 convchannels32 inputfeatperchannel24 inputchannels1 hiddenactrelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings20 maxsourcepositions20 maxtargetpositions20 eostokenid2 padtokenid1 bostokenid0 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self numconvlayers numconvlayers self convkernelsizes convkernelsizes self convchannels convchannels self inputfeatperchannel inputfeatperchannel self inputchannels inputchannels self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self maxsourcepositions maxsourcepositions self maxtargetpositions maxtargetpositions self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid def prepareconfigandinputsself inputfeatures floatstensor self batchsize self seqlength self inputfeatperchannel self vocabsize attentionmask torch onesself batchsize self seqlength dtypetorch long devicetorchdevice decoderinputids idstensorself batchsize self seqlength self vocabsize clamp2 config self getconfig inputsdict preparespeechtotextinputsdict config inputfeaturesinputfeatures decoderinputidsdecoderinputids attentionmaskattentionmask return config inputsdict def getconfigself return speech2textconfig vocabsizeself vocabsize dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize numconvlayersself numconvlayers convkernelsizesself convkernelsizes convchannelsself convchannels inputfeatperchannelself inputfeatperchannel inputchannelsself inputchannels dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings maxsourcepositionsself maxsourcepositions maxtargetpositionsself maxtargetpositions eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def getsubsampledoutputlengthsself inputlengths for i in rangeself numconvlayers inputlengths inputlengths 1 2 1 return inputlengths def createandcheckmodelforwardself config inputsdict model speech2textmodelconfigconfig totorchdevice eval inputfeatures inputsdictinputfeatures decoderinputids inputsdictdecoderinputids first forward pass lasthiddenstate modelinputfeatures decoderinputidsdecoderinputids lasthiddenstate self parent asserttruelasthiddenstate shape 13 7 16 def createandcheckdecodermodelpastlargeinputsself config inputsdict model speech2textmodelconfigconfig getdecoder totorchdevice eval inputids inputsdictdecoderinputids attentionmask inputsdictdecoderattentionmask first forward pass outputs modelinputids attentionmaskattentionmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize clamp2 nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e2 def checkencoderdecodermodelstandaloneself config inputsdict model speech2textmodelconfigconfig totorchdevice eval outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder speech2textencoder frompretrainedtmpdirname totorchdevice encoderlasthiddenstate2 encoder inputsdictinputfeatures attentionmaskinputsdictattentionmask 0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max item 1e3 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder speech2textdecoder frompretrainedtmpdirname totorchdevice encoderattentionmask encoder getfeaturevectorattentionmask encoderlasthiddenstate shape1 inputsdictattentionmask lasthiddenstate2 decoder inputidsinputsdictdecoderinputids attentionmaskinputsdictdecoderattentionmask encoderhiddenstatesencoderlasthiddenstate encoderattentionmaskencoderattentionmask 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max item 1e3 requiretorch class speech2textmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses speech2textmodel speech2textforconditionalgeneration if istorchavailable else allgenerativemodelclasses speech2textforconditionalgeneration if istorchavailable else pipelinemodelmapping automaticspeechrecognition speech2textforconditionalgeneration featureextraction speech2textmodel if istorchavailable else isencoderdecoder true fxcompatible true testpruning false testmissingkeys false inputname inputfeatures def setupself self modeltester speech2textmodeltesterself self configtester configtesterself configclassspeech2textconfig self maxdiff 3000 def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testmodelforwardself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelforwardconfigandinputs def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testencoderdecodermodelstandaloneself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkencoderdecodermodelstandaloneconfigandinputs not implemented currently def testinputsembedsself pass training is not supported yet def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass requiretorchfp16 def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs inputfeatures inputdictinputfeatures attentionmask inputdictattentionmask model speech2textforconditionalgenerationconfig eval totorchdevice inputfeatures inputfeatures half model half model generateinputfeatures attentionmaskattentionmask model generateinputfeatures numbeams4 dosampletrue earlystoppingfalse numreturnsequences3 def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputfeatures attentionmask decoderinputids decoderattentionmask expectedargnames extend headmask decoderheadmask crossattnheadmask encoderoutputs if headmask and decoderheadmask and crossattnheadmask in argnames else encoderoutputs self assertlistequalargnames lenexpectedargnames expectedargnames def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester encoderseqlength else seqlength self modeltester seqlength subsampledseqlength model getfeatextractoutputlengthsseqlength self assertlistequal listhiddenstates0 shape2 subsampledseqlength self modeltester hiddensize if config isencoderdecoder hiddenstates outputs decoderhiddenstates self assertisinstancehiddenstates list tuple self assertequallenhiddenstates expectednumlayers seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen self assertlistequal listhiddenstates0 shape2 decoderseqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen decoderkeylength getattrself modeltester decoderkeylength decoderseqlength encoderkeylength getattrself modeltester keylength encoderseqlength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval subsampledencoderseqlength model getfeatextractoutputlengthsencoderseqlength subsampledencoderkeylength model getfeatextractoutputlengthsencoderkeylength with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength outlen lenoutputs correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength subsampledencoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass addedhiddenstates 2 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength def testresizetokensembeddingsself originalconfig inputsdict self modeltester prepareconfigandinputsforcommon if not self testresizeembeddings return for modelclass in self allmodelclasses config copy deepcopyoriginalconfig model modelclassconfig model totorchdevice if self modeltester istraining is false model eval modelvocabsize config vocabsize retrieve the embeddings and clone theme modelembed model resizetokenembeddingsmodelvocabsize clonedembeddings modelembed weight clone check that resizing the token embeddings with a larger vocab size increases the model s vocab size modelembed model resizetokenembeddingsmodelvocabsize 10 self assertequalmodel config vocabsize modelvocabsize 10 check that it actually resizes the embeddings matrix self assertequalmodelembed weight shape0 clonedembeddings shape0 10 check that the model can still do a forward pass successfully every parameter should be resized modelself prepareforclassinputsdict modelclass check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size modelembed model resizetokenembeddingsmodelvocabsize 15 self assertequalmodel config vocabsize modelvocabsize 15 check that it actually resizes the embeddings matrix self assertequalmodelembed weight shape0 clonedembeddings shape0 15 make sure that decoderinputids are resized if decoderinputids in inputsdict inputsdictdecoderinputids clampmaxmodelvocabsize 15 1 modelself prepareforclassinputsdict modelclass check that adding and removing tokens has not modified the first part of the embedding matrix modelsequal true for p1 p2 in zipclonedembeddings modelembed weight if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testresizeembeddingsuntiedself originalconfig inputsdict self modeltester prepareconfigandinputsforcommon if not self testresizeembeddings return originalconfig tiewordembeddings false if model cannot untied embeddings leave test if originalconfig tiewordembeddings return for modelclass in self allmodelclasses config copy deepcopyoriginalconfig model modelclassconfig totorchdevice if no output embeddings leave test if model getoutputembeddings is none continue check that resizing the token embeddings with a larger vocab size increases the model s vocab size modelvocabsize config vocabsize model resizetokenembeddingsmodelvocabsize 10 self assertequalmodel config vocabsize modelvocabsize 10 outputembeds model getoutputembeddings self assertequaloutputembeds weight shape0 modelvocabsize 10 check bias if present if outputembeds bias is not none self assertequaloutputembeds bias shape0 modelvocabsize 10 check that the model can still do a forward pass successfully every parameter should be resized modelself prepareforclassinputsdict modelclass check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size model resizetokenembeddingsmodelvocabsize 15 self assertequalmodel config vocabsize modelvocabsize 15 check that it actually resizes the embeddings matrix outputembeds model getoutputembeddings self assertequaloutputembeds weight shape0 modelvocabsize 15 check bias if present if outputembeds bias is not none self assertequaloutputembeds bias shape0 modelvocabsize 15 check that the model can still do a forward pass successfully every parameter should be resized if decoderinputids in inputsdict inputsdictdecoderinputids clampmaxmodelvocabsize 15 1 check that the model can still do a forward pass successfully every parameter should be resized modelself prepareforclassinputsdict modelclass def testgeneratewithoutinputidsself pass staticmethod def getencoderoutputs model inputids attentionmask outputattentionsnone outputhiddenstatesnone numinterleave1 encoder model getencoder encoderoutputs encoder inputids attentionmaskattentionmask outputattentionsoutputattentions outputhiddenstatesoutputhiddenstates encoderoutputslasthiddenstate encoderoutputs lasthiddenstate repeatinterleave numinterleave dim0 inputids inputids 0 inputids torch zeroslikeinputids 1 dtypetorch long model getdecoderstarttokenid attentionmask none return encoderoutputs inputids attentionmask def checkoutputsself output inputids config usecachefalse numreturnsequences1 batchsize seqlength inputids shape 2 subsampledseqlength self modeltester getsubsampledoutputlengthsseqlength numsequencesinoutput batchsize numreturnsequences genlen output sequences shape1 1 if config isencoderdecoder else output sequences shape1 seqlength scores self checkscoresnumsequencesinoutput output scores lengthgenlen configconfig attentions encoder self checkencoderattentionforgenerate output encoderattentions batchsize config subsampledseqlength decoder self checkattentionsforgenerate numsequencesinoutput output decoderattentions minlength1 maxlengthoutput sequences shape1 configconfig usecacheusecache hidden states encoder self checkencoderhiddenstatesforgenerate output encoderhiddenstates batchsize config subsampledseqlength decoder self checkhiddenstatesforgenerate numsequencesinoutput output decoderhiddenstates minlength1 maxlengthoutput sequences shape1 configconfig usecacheusecache def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval inputs self prepareforclassinputsdict modelclass try model config usecache false fstm still requires this hack fstm should probably be refactored similar to bart afterward inputfeatures inputsinputfeatures attentionmask inputsattentionmask decoderinputids inputsdecoderinputids decoderattentionmask inputsdecoderattentionmask tracedmodel torch jit trace model inputfeatures attentionmask decoderinputids decoderattentionmask except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testpttfmodelequivalenceself allowmissingkeystrue allow missing keys since tf doesn t cache the sinusoidal embeddings in an attribute super testpttfmodelequivalenceallowmissingkeysallowmissingkeys unittest skiptest failing rocketnight is looking into it def testtffromptsafetensorsself pass requiretorch requiretorchaudio requiresentencepiece requiretokenizers slow class speech2textmodelintegrationtestsunittest testcase cachedproperty def defaultprocessorself return speech2textprocessor frompretrainedfacebooks2tsmalllibrispeechasr def loaddatasamplesself numsamples from datasets import loaddataset ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples def testgenerationlibrispeechself model speech2textforconditionalgeneration frompretrainedfacebooks2tsmalllibrispeechasr model totorchdevice processor self defaultprocessor inputspeech self loaddatasamples1 inputfeatures processorinputspeech returntensorspt inputfeatures totorchdevice generatedids model generateinputfeatures generatedtranscript processor batchdecodegeneratedids skipspecialtokenstrue expectedtranscriptions mister quilter is the apostle of the middle classes and we are glad to welcome his gospel self assertlistequalgeneratedtranscript expectedtranscriptions def testgenerationlibrispeechbatchedself model speech2textforconditionalgeneration frompretrainedfacebooks2tsmalllibrispeechasr model totorchdevice processor self defaultprocessor inputspeech self loaddatasamples4 inputs processorinputspeech returntensorspt paddingtrue inputfeatures inputs inputfeatures totorchdevice attentionmask inputs attentionmask totorchdevice generatedids model generateinputfeatures attentionmaskattentionmask generatedtranscripts processor batchdecodegeneratedids skipspecialtokenstrue expectedtranscriptions mister quilter is the apostle of the middle classes and we are glad to welcome his gospel nor is mister cultar s manner less interesting than his matter he tells us that at this festive season of the year with christmas and roast beef looming before us similes drawn from eating and its results occur most readily to the mind he has grave doubts whether sir frederick leyton s work is really greek after all and can discover in it but little of rocky ithaca self assertlistequalgeneratedtranscripts expectedtranscriptions coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch speech2text model input_ids input_features computes the output length of the convolutional layers first forward pass first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice not implemented currently training is not supported yet signature parameters is an ordereddict so arg_names order is deterministic check that output_hidden_states also work using config check that output_attentions also work using config loss is at first position loss is added to beginning past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine retrieve the embeddings and clone theme check that resizing the token embeddings with a larger vocab size increases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix make sure that decoder_input_ids are resized check that adding and removing tokens has not modified the first part of the embedding matrix if model cannot untied embeddings leave test if no output embeddings leave test check that resizing the token embeddings with a larger vocab size increases the model s vocab size check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that the model can still do a forward pass successfully every parameter should be resized scores attentions encoder decoder hidden states encoder decoder to be sure we have no nan fstm still requires this hack fstm should probably be refactored similar to bart afterward allow missing keys since tf doesn t cache the sinusoidal embeddings in an attribute automatic decoding with librispeech
import copy import inspect import os import tempfile import unittest from transformers import Speech2TextConfig from transformers.testing_utils import ( is_torch_available, require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, require_torchaudio, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import Speech2TextForConditionalGeneration, Speech2TextModel, Speech2TextProcessor from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextDecoder, Speech2TextEncoder def prepare_speech_to_text_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_features.ne(0) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_features": input_features, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class Speech2TextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, num_conv_layers=2, conv_kernel_sizes=(5, 5), conv_channels=32, input_feat_per_channel=24, input_channels=1, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=20, max_target_positions=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.num_conv_layers = num_conv_layers self.conv_kernel_sizes = conv_kernel_sizes self.conv_channels = conv_channels self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_features = floats_tensor( [self.batch_size, self.seq_length, self.input_feat_per_channel], self.vocab_size ) attention_mask = torch.ones([self.batch_size, self.seq_length], dtype=torch.long, device=torch_device) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2) config = self.get_config() inputs_dict = prepare_speech_to_text_inputs_dict( config, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) return config, inputs_dict def get_config(self): return Speech2TextConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, num_conv_layers=self.num_conv_layers, conv_kernel_sizes=self.conv_kernel_sizes, conv_channels=self.conv_channels, input_feat_per_channel=self.input_feat_per_channel, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict): model = Speech2TextModel(config=config).to(torch_device).eval() input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = Speech2TextModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = Speech2TextModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = Speech2TextEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder( inputs_dict["input_features"], attention_mask=inputs_dict["attention_mask"] )[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = Speech2TextDecoder.from_pretrained(tmpdirname).to(torch_device) encoder_attention_mask = encoder._get_feature_vector_attention_mask( encoder_last_hidden_state.shape[1], inputs_dict["attention_mask"] ) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=encoder_attention_mask, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Speech2TextModel, Speech2TextForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (Speech2TextForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( {"automatic-speech-recognition": Speech2TextForConditionalGeneration, "feature-extraction": Speech2TextModel} if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False input_name = "input_features" def setUp(self): self.model_tester = Speech2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Speech2TextConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_inputs_embeds(self): pass def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_features = input_dict["input_features"] attention_mask = input_dict["attention_mask"] model = Speech2TextForConditionalGeneration(config).eval().to(torch_device) input_features = input_features.half() model.half() model.generate(input_features, attention_mask=attention_mask) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) if model.get_output_embeddings() is None: continue model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] input_ids = torch.zeros_like(input_ids[:, :1], dtype=torch.long) + model._get_decoder_start_token_id() attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape[:2] subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: model.config.use_cache = False input_features = inputs["input_features"] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] traced_model = torch.jit.trace( model, (input_features, attention_mask, decoder_input_ids, decoder_attention_mask) ) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_pt_tf_model_equivalence(self, allow_missing_keys=True): super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) @unittest.skip("Test failing, @RocketNight is looking into it") def test_tf_from_pt_safetensors(self): pass @require_torch @require_torchaudio @require_sentencepiece @require_tokenizers @slow class Speech2TextModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(1) input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device) generated_ids = model.generate(input_features) generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" ] self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS) def test_generation_librispeech_batched(self): model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_features = inputs.input_features.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) generated_ids = model.generate(input_features, attention_mask=attention_mask) generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister cultar's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and roast beef looming before us" " similes drawn from eating and its results occur most readily to the mind", "he has grave doubts whether sir frederick leyton's work is really greek after all and can discover in it" " but little of rocky ithaca", ] self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow speech2text model from future import annotations import inspect import unittest from transformers import speech2textconfig from transformers testingutils import requiresentencepiece requiretf requiretokenizers slow from transformers utils import cachedproperty istfavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import speech2textprocessor tfspeech2textforconditionalgeneration tfspeech2textmodel def preparespeechtotextinputsdict config inputfeatures decoderinputids attentionmasknone decoderattentionmasknone headmasknone decoderheadmasknone crossattnheadmasknone if attentionmask is none attentionmask tf math notequalinputfeatures 0 if decoderattentionmask is none decoderattentionmask tf math notequaldecoderinputids config padtokenid if headmask is none headmask tf onesconfig encoderlayers config encoderattentionheads if decoderheadmask is none decoderheadmask tf onesconfig decoderlayers config decoderattentionheads if crossattnheadmask is none crossattnheadmask tf onesconfig decoderlayers config decoderattentionheads return inputfeatures inputfeatures decoderinputids decoderinputids attentionmask attentionmask decoderattentionmask attentionmask headmask headmask decoderheadmask decoderheadmask crossattnheadmask crossattnheadmask requiretf class tfspeech2textmodeltester def init self parent batchsize13 seqlength7 istrainingtrue uselabelsfalse vocabsize99 hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 numconvlayers2 convkernelsizes5 5 convchannels32 inputfeatperchannel24 inputchannels1 hiddenactrelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings20 maxsourcepositions20 maxtargetpositions20 eostokenid2 padtokenid1 bostokenid0 scaleembeddingfalse self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self numconvlayers numconvlayers self convkernelsizes convkernelsizes self convchannels convchannels self inputfeatperchannel inputfeatperchannel self inputchannels inputchannels self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self maxsourcepositions maxsourcepositions self maxtargetpositions maxtargetpositions self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid self scaleembedding scaleembedding def prepareconfigandinputsself inputfeatures floatstensor self batchsize self seqlength self inputfeatperchannel self vocabsize attentionmask tf onesself batchsize self seqlength dtypetf int64 decoderinputids tf math maximumidstensorself batchsize self seqlength self vocabsize 2 config self getconfig inputsdict preparespeechtotextinputsdict config inputfeaturesinputfeatures decoderinputidsdecoderinputids attentionmaskattentionmask return config inputsdict def getconfigself return speech2textconfig vocabsizeself vocabsize dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize numconvlayersself numconvlayers convkernelsizesself convkernelsizes convchannelsself convchannels inputfeatperchannelself inputfeatperchannel inputchannelsself inputchannels dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings maxsourcepositionsself maxsourcepositions maxtargetpositionsself maxtargetpositions eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid scaleembeddingself scaleembedding def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def getsubsampledoutputlengthsself inputlengths for in rangeself numconvlayers inputlengths inputlengths 1 2 1 return inputlengths def createandcheckdecodermodelpastlargeinputsself config inputsdict model tfspeech2textmodelconfigconfig getdecoder inputids inputsdictdecoderinputids attentionmask inputsdictdecoderattentionmask first forward pass outputs modelinputids attentionmaskattentionmask usecachetrue pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens tf math maximumidstensorself batchsize 3 config vocabsize 2 nextattnmask idstensorself batchsize 3 2 dtypetf int64 append to next inputids and nextinputids tf concatinputids nexttokens axis1 nextattentionmask tf concatattentionmask nextattnmask axis1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx intidstensor1 outputfrompast shape1 outputfromnopastslice outputfromnopast 3 randomsliceidx outputfrompastslice outputfrompast randomsliceidx self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice tf debugging assertnearoutputfrompastslice outputfromnopastslice atol1e2 requiretf class tfspeech2textmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfspeech2textmodel tfspeech2textforconditionalgeneration if istfavailable else allgenerativemodelclasses tfspeech2textforconditionalgeneration if istfavailable else pipelinemodelmapping featureextraction tfspeech2textmodel if istfavailable else isencoderdecoder true testpruning false testmissingkeys false testonnx false inputname inputids def setupself self modeltester tfspeech2textmodeltesterself self configtester configtesterself configclassspeech2textconfig self maxdiff 3000 def testconfigself self configtester runcommontests def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs not implemented currently def testinputsembedsself pass training is not supported yet def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testgeneratefp16self pass def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester encoderseqlength else seqlength self modeltester seqlength subsampledseqlength model getfeatextractoutputlengthsseqlength self assertlistequal listhiddenstates0 shape2 subsampledseqlength self modeltester hiddensize if config isencoderdecoder hiddenstates outputs decoderhiddenstates self assertisinstancehiddenstates list tuple self assertequallenhiddenstates expectednumlayers seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen self assertlistequal listhiddenstates0 shape2 decoderseqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen decoderkeylength getattrself modeltester decoderkeylength decoderseqlength encoderkeylength getattrself modeltester keylength encoderseqlength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig subsampledencoderseqlength model getfeatextractoutputlengthsencoderseqlength subsampledencoderkeylength model getfeatextractoutputlengthsencoderkeylength outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength outlen lenoutputs correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength subsampledencoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass addedhiddenstates 2 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength def testresizetokenembeddingsself overwritten method from parent see testresizeembeddingsuntied pass def testresizetokensembeddingsself see testresizeembeddingsuntied pass def testresizeembeddingsuntiedself todo copy test from pt not working at the moment because the test relies on model resizetokenembeddings whose tf implementation assumes the use of tfwrappedembeddings but with a tfwrappedembeddings we can t load the weights from pt also it induces tf1 behavior so we might want to rework how model resizetokenembeddings operates pass def testgeneratewithoutinputidsself pass staticmethod def getencoderoutputs model inputids attentionmask outputattentionsnone outputhiddenstatesnone numinterleave1 encoder model getencoder encoderoutputs encoder inputids attentionmaskattentionmask outputattentionsoutputattentions outputhiddenstatesoutputhiddenstates encoderoutputslasthiddenstate tf repeatencoderoutputs lasthiddenstate numinterleave axis0 inputids inputids 0 inputids tf zeroslikeinputids 1 dtypetf int64 model getdecoderstarttokenid attentionmask none return encoderoutputs inputids attentionmask def checkoutputsself output inputids config usecachefalse numreturnsequences1 batchsize seqlength inputids shape 2 subsampledseqlength self modeltester getsubsampledoutputlengthsseqlength numsequencesinoutput batchsize numreturnsequences genlen output sequences shape1 1 if config isencoderdecoder else output sequences shape1 seqlength scores self checkscoresnumsequencesinoutput output scores lengthgenlen configconfig attentions encoder self checkencoderattentionforgenerate output encoderattentions batchsize config subsampledseqlength decoder self checkattentionsforgenerate numsequencesinoutput output decoderattentions minlength1 maxlengthoutput sequences shape1 configconfig usecacheusecache hidden states encoder self checkencoderhiddenstatesforgenerate output encoderhiddenstates batchsize config subsampledseqlength decoder self checkhiddenstatesforgenerate numsequencesinoutput output decoderhiddenstates minlength1 maxlengthoutput sequences shape1 configconfig usecacheusecache overwritten from parent due to the inability to work when nontext inputs are not passed and because the input is inputfeatures def testlmheadmodelrandomnobeamsearchgenerateself config inputsdict self modeltester prepareconfigandinputsforcommon inputfeatures inputsdict getinputfeatures none iterate over all generative models for modelclass in self allgenerativemodelclasses model modelclassconfig if config bostokenid is none if bos token id is not defined model needs inputfeatures with self assertraisesassertionerror model generatedosampletrue maxlength5 numreturnsequences 1 self checkgeneratedidsmodel generateinputfeatures dosampletrue with self assertraisesvalueerror generating multiple sequences when no beam search generation is not allowed as it would always generate the same sequences model generateinputfeatures dosamplefalse numreturnsequences2 numreturnsequences 1 sample self checkgeneratedidsmodel generateinputfeatures dosampletrue numreturnsequences2 check bad words tokens language generation create list of 1seq bad token and list of 2seq of bad tokens badwordsids self generaterandombadtokens1 model self generaterandombadtokens2 model outputtokens model generate inputfeatures dosampletrue badwordsidsbadwordsids numreturnsequences2 only count generated tokens generatedids outputtokens inputfeatures shape1 self assertfalseself checkmatchtokensgeneratedids numpy tolist badwordsids overwritten from parent due to the inability to work when nontext inputs are not passed and because the input is inputfeatures def testlmheadmodelrandombeamsearchgenerateself config inputsdict self modeltester prepareconfigandinputsforcommon inputfeatures inputsdict getinputfeatures none for modelclass in self allgenerativemodelclasses model modelclassconfig if config bostokenid is none if bos token id is not defined model needs inputids numreturnsequences 1 self checkgeneratedidsmodel generateinputfeatures dosampletrue numbeams2 with self assertraisesvalueerror generating more sequences than having beams leads is not possible model generateinputfeatures dosamplefalse numreturnsequences3 numbeams2 numreturnsequences 1 sample self checkgeneratedids model generate inputfeatures dosampletrue numbeams2 numreturnsequences2 numreturnsequences 1 greedy self checkgeneratedids model generateinputfeatures dosamplefalse numbeams2 numreturnsequences2 check bad words tokens language generation create list of 1seq bad token and list of 2seq of bad tokens badwordsids self generaterandombadtokens1 model self generaterandombadtokens2 model outputtokens model generate inputfeatures dosamplefalse badwordsidsbadwordsids numbeams2 numreturnsequences2 only count generated tokens generatedids outputtokens inputfeatures shape1 self assertfalseself checkmatchtokensgeneratedids numpy tolist badwordsids overwritten from parent the input is inputfeatures not inputids def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputfeatures attentionmask decoderinputids decoderattentionmask self assertlistequalargnames lenexpectedargnames expectedargnames def testpttfmodelequivalenceself allowmissingkeystrue allow missing keys since tf doesn t cache the sinusoidal embeddings in an attribute super testpttfmodelequivalenceallowmissingkeysallowmissingkeys requiretf requiresentencepiece requiretokenizers slow class tfspeech2textmodelintegrationtestsunittest testcase cachedproperty def defaultprocessorself return speech2textprocessor frompretrainedfacebooks2tsmalllibrispeechasr def loaddatasamplesself numsamples from datasets import loaddataset ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples def testgenerationlibrispeechself model tfspeech2textforconditionalgeneration frompretrainedfacebooks2tsmalllibrispeechasr processor self defaultprocessor inputspeech self loaddatasamples1 inputfeatures processorinputspeech returntensorstf inputfeatures generatedids model generateinputfeatures generatedtranscript processor batchdecodegeneratedids skipspecialtokenstrue expectedtranscriptions mister quilter is the apostle of the middle classes and we are glad to welcome his gospel self assertlistequalgeneratedtranscript expectedtranscriptions def testgenerationlibrispeechbatchedself model tfspeech2textforconditionalgeneration frompretrainedfacebooks2tsmalllibrispeechasr processor self defaultprocessor inputspeech self loaddatasamples4 inputs processorinputspeech returntensorstf paddingtrue generatedids model generateinputs inputfeatures attentionmaskinputs attentionmask generatedtranscripts processor batchdecodegeneratedids skipspecialtokenstrue expectedtranscriptions mister quilter is the apostle of the middle classes and we are glad to welcome his gospel nor is mister cultar s manner less interesting than his matter he tells us that at this festive season of the year with christmas and roast beef looming before us similes drawn from eating and its results occur most readily to the mind he has grave doubts whether sir frederick leyton s work is really greek after all and can discover in it but little of rocky ithaca self assertlistequalgeneratedtranscripts expectedtranscriptions coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow speech2text model computes the output length of the convolutional layers first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice not implemented currently training is not supported yet check that output_hidden_states also work using config check that output_attentions also work using config loss is at first position loss is added to beginning past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine overwritten method from parent see test_resize_embeddings_untied see test_resize_embeddings_untied todo copy test from pt not working at the moment because the test relies on model resize_token_embeddings whose tf implementation assumes the use of tfwrappedembeddings but with a tfwrappedembeddings we can t load the weights from pt also it induces tf1 behavior so we might want to rework how model resize_token_embeddings operates scores attentions encoder decoder hidden states encoder decoder overwritten from parent due to the inability to work when non text inputs are not passed and because the input is input_features iterate over all generative models if bos token id is not defined model needs input_features num_return_sequences 1 generating multiple sequences when no beam search generation is not allowed as it would always generate the same sequences num_return_sequences 1 sample check bad words tokens language generation create list of 1 seq bad token and list of 2 seq of bad tokens only count generated tokens overwritten from parent due to the inability to work when non text inputs are not passed and because the input is input_features if bos token id is not defined model needs input_ids num_return_sequences 1 generating more sequences than having beams leads is not possible num_return_sequences 1 sample num_return_sequences 1 greedy check bad words tokens language generation create list of 1 seq bad token and list of 2 seq of bad tokens only count generated tokens overwritten from parent the input is input_features not input_ids signature parameters is an ordereddict so arg_names order is deterministic allow missing keys since tf doesn t cache the sinusoidal embeddings in an attribute automatic decoding with librispeech
from __future__ import annotations import inspect import unittest from transformers import Speech2TextConfig from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property, is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import Speech2TextProcessor, TFSpeech2TextForConditionalGeneration, TFSpeech2TextModel def prepare_speech_to_text_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.math.not_equal(input_features, 0) if decoder_attention_mask is None: decoder_attention_mask = tf.math.not_equal(decoder_input_ids, config.pad_token_id) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_features": input_features, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFSpeech2TextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, num_conv_layers=2, conv_kernel_sizes=(5, 5), conv_channels=32, input_feat_per_channel=24, input_channels=1, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=20, max_target_positions=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, scale_embedding=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.num_conv_layers = num_conv_layers self.conv_kernel_sizes = conv_kernel_sizes self.conv_channels = conv_channels self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.scale_embedding = scale_embedding def prepare_config_and_inputs(self): input_features = floats_tensor( [self.batch_size, self.seq_length, self.input_feat_per_channel], self.vocab_size ) attention_mask = tf.ones([self.batch_size, self.seq_length], dtype=tf.int64) decoder_input_ids = tf.math.maximum(ids_tensor([self.batch_size, self.seq_length], self.vocab_size), 2) config = self.get_config() inputs_dict = prepare_speech_to_text_inputs_dict( config, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) return config, inputs_dict def get_config(self): return Speech2TextConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, num_conv_layers=self.num_conv_layers, conv_kernel_sizes=self.conv_kernel_sizes, conv_channels=self.conv_channels, input_feat_per_channel=self.input_feat_per_channel, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, scale_embedding=self.scale_embedding, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): for _ in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFSpeech2TextModel(config=config).get_decoder() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) _, past_key_values = outputs.to_tuple() next_tokens = tf.math.maximum(ids_tensor((self.batch_size, 3), config.vocab_size), 2) next_attn_mask = ids_tensor((self.batch_size, 3), 2, dtype=tf.int64) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, atol=1e-2) @require_tf class TFSpeech2TextModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFSpeech2TextModel, TFSpeech2TextForConditionalGeneration) if is_tf_available() else () all_generative_model_classes = (TFSpeech2TextForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFSpeech2TextModel} if is_tf_available() else {} is_encoder_decoder = True test_pruning = False test_missing_keys = False test_onnx = False input_name = "input_ids" def setUp(self): self.model_tester = TFSpeech2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Speech2TextConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): pass def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_generate_fp16(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_resize_token_embeddings(self): pass def test_resize_tokens_embeddings(self): pass def test_resize_embeddings_untied(self): pass def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = tf.repeat(encoder_outputs.last_hidden_state, num_interleave, axis=0) input_ids = input_ids[:, :, 0] input_ids = tf.zeros_like(input_ids[:, :1], dtype=tf.int64) + model._get_decoder_start_token_id() attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape[:2] subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: with self.assertRaises(AssertionError): model.generate(do_sample=True, max_length=5) self._check_generated_ids(model.generate(input_features, do_sample=True)) with self.assertRaises(ValueError): model.generate(input_features, do_sample=False, num_return_sequences=2) self._check_generated_ids(model.generate(input_features, do_sample=True, num_return_sequences=2)) bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: self._check_generated_ids(model.generate(input_features, do_sample=True, num_beams=2)) with self.assertRaises(ValueError): model.generate(input_features, do_sample=False, num_return_sequences=3, num_beams=2) self._check_generated_ids( model.generate( input_features, do_sample=True, num_beams=2, num_return_sequences=2, ) ) self._check_generated_ids( model.generate(input_features, do_sample=False, num_beams=2, num_return_sequences=2) ) bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_pt_tf_model_equivalence(self, allow_missing_keys=True): super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) @require_tf @require_sentencepiece @require_tokenizers @slow class TFSpeech2TextModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = TFSpeech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") processor = self.default_processor input_speech = self._load_datasamples(1) input_features = processor(input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features) generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" ] self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS) def test_generation_librispeech_batched(self): model = TFSpeech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") processor = self.default_processor input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="tf", padding=True) generated_ids = model.generate(inputs.input_features, attention_mask=inputs.attention_mask) generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister cultar's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and roast beef looming before us" " similes drawn from eating and its results occur most readily to the mind", "he has grave doubts whether sir frederick leyton's work is really greek after all and can discover in it" " but little of rocky ithaca", ] self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS)
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import shutil import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import Speech2TextFeatureExtractor, Speech2TextProcessor, Speech2TextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, require_torchaudio from transformers.utils import FEATURE_EXTRACTOR_NAME from .test_feature_extraction_speech_to_text import floats_list SAMPLE_SP = get_tests_dir("fixtures/test_sentencepiece.model") @require_torch @require_torchaudio @require_sentencepiece class Speech2TextProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab = ["<s>", "<pad>", "</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) save_dir = Path(self.tmpdirname) save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab_file"]) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["spm_file"]) tokenizer = Speech2TextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) feature_extractor_map = { "feature_size": 24, "num_mel_bins": 24, "padding_value": 0.0, "sampling_rate": 16000, "return_attention_mask": False, "do_normalize": True, } save_json(feature_extractor_map, save_dir / FEATURE_EXTRACTOR_NAME) def get_tokenizer(self, **kwargs): return Speech2TextTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return Speech2TextFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = Speech2TextProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, Speech2TextTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, Speech2TextFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = Speech2TextProcessor( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = Speech2TextProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, Speech2TextTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, Speech2TextFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", )
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test converttokentoid and convertidtotoken token pad tokenid 1 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 s self assertequalvocabkeys1 pad self assertequalvocabkeys1 j self assertequallenvocabkeys 1001 def testvocabsizeself self assertequalself gettokenizer vocabsize 1001 def testfulltokenizerself tokenizer speech2texttokenizer frompretrainedself tmpdirname tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a t est self assertlistequal tokenizer converttokenstoidstokens 289 50 14 174 386 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequaltokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s fmt skip ids tokenizer converttokenstoidstokens self assertlistequalids 12 25 88 59 28 23 11 4 606 351 351 351 7 16 70 50 76 84 10 4 8 backtokens tokenizer convertidstotokensids self assertlistequalbacktokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline unk 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s unk fmt skip slow def testtokenizerintegrationself expectedencoding inputids 3791 797 31 11 64 797 31 2429 433 12 1176 12 20 786 915 142 2413 240 37 3238 797 31 11 35 93 915 142 2413 240 37 5540 567 1276 93 37 610 40 62 455 657 1042 123 780 177 37 309 241 1298 514 20 292 2737 114 2469 241 85 64 302 548 528 423 4 509 406 423 37 601 4 777 302 548 528 423 284 4 3388 511 459 4 3555 40 321 302 705 4 3388 511 583 326 5 5 5 62 3310 560 177 2680 217 1508 32 31 853 418 64 583 511 1605 62 35 93 560 177 2680 217 1508 1521 64 583 511 519 62 20 1515 764 20 149 261 5625 7972 20 5540 567 1276 93 3925 1675 11 15 802 7972 576 217 1508 11 35 93 1253 2441 15 289 652 31 416 321 3842 115 40 911 8 476 619 4 380 142 423 335 240 35 93 264 8 11 335 569 420 163 5 2 260 548 528 423 20 451 20 2681 1153 3434 20 5540 37 567 126 1253 2441 3376 449 210 431 1563 177 767 5540 11 1203 472 11 2953 685 285 364 706 1153 20 6799 20 2869 20 4464 126 40 2429 20 1040 866 2664 418 20 318 20 1726 186 20 265 522 35 93 2191 4634 20 1040 12 6799 15 228 2356 142 31 11 5 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2575 2666 684 1582 1176 12 627 149 619 20 4902 563 11 20 149 261 3420 2356 174 142 4714 131 5 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamefacebooks2tsmallmustcendest revisiona14f04cf0776c02f62a8cb800cf7909e15ea23ad requiresentencepiece class speechtotexttokenizermultilinguialtestunittest testcase checkpointname valhallas2tmustcmultilinguialmedium frenchtext c est trop cool spanishtext esto es genial classmethod def setupclasscls cls tokenizer speech2texttokenizer speech2texttokenizer frompretrainedcls checkpointname return cls def checklanguagecodesself self assertequalself tokenizer langcodetoidpt 4 self assertequalself tokenizer langcodetoidru 6 self assertequalself tokenizer langcodetoidit 9 self assertequalself tokenizer langcodetoidde 11 def testvocabsizeself self assertequalself tokenizer vocabsize 10000 def testtokenizerdecodeignoreslanguagecodesself self assertinescode self tokenizer allspecialids generatedids escode 4 1601 47 7647 2 result self tokenizer decodegeneratedids skipspecialtokenstrue expectedspanish self tokenizer decodegeneratedids1 skipspecialtokenstrue self assertequalresult expectedspanish self assertnotinself tokenizer eostoken result def testtokenizeraddsspecialtokensself self tokenizer tgtlang fr encoded self tokenizerself frenchtext inputids self assertequalencoded0 frcode self assertequalencoded1 self tokenizer eostokenid def testtgtlangsetterself self tokenizer tgtlang fr self assertlistequalself tokenizer prefixtokens frcode self tokenizer tgtlang es self assertlistequalself tokenizer prefixtokens escode 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test _convert_token_to_id and _convert_id_to_token fmt skip fmt skip fmt skip
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import Speech2TextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_sentencepiece_available(): import sentencepiece as sp FR_CODE = 5 ES_CODE = 10 @require_sentencepiece @require_tokenizers class SpeechToTextTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = Speech2TextTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() spm_model = sp.SentencePieceProcessor() spm_model.Load(SAMPLE_VOCAB) vocab = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(spm_model))] vocab_tokens = dict(zip(vocab, range(len(vocab)))) save_dir = Path(self.tmpdirname) save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab_file"]) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(SAMPLE_VOCAB, save_dir / VOCAB_FILES_NAMES["spm_file"]) tokenizer = Speech2TextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-1], "j") self.assertEqual(len(vocab_keys), 1_001) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_001) def test_full_tokenizer(self): tokenizer = Speech2TextTokenizer.from_pretrained(self.tmpdirname) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [289, 50, 14, 174, 386], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual(tokens,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."]) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual(back_tokens,[SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."]) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="facebook/s2t-small-mustc-en-de-st", revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad", ) @require_sentencepiece class SpeechToTextTokenizerMultilinguialTest(unittest.TestCase): checkpoint_name = "valhalla/s2t_mustc_multilinguial_medium" french_text = "C'est trop cool" spanish_text = "Esto es genial" @classmethod def setUpClass(cls): cls.tokenizer: Speech2TextTokenizer = Speech2TextTokenizer.from_pretrained(cls.checkpoint_name) return cls def check_language_codes(self): self.assertEqual(self.tokenizer.lang_code_to_id["pt"], 4) self.assertEqual(self.tokenizer.lang_code_to_id["ru"], 6) self.assertEqual(self.tokenizer.lang_code_to_id["it"], 9) self.assertEqual(self.tokenizer.lang_code_to_id["de"], 11) def test_vocab_size(self): self.assertEqual(self.tokenizer.vocab_size, 10_000) def test_tokenizer_decode_ignores_language_codes(self): self.assertIn(ES_CODE, self.tokenizer.all_special_ids) generated_ids = [ES_CODE, 4, 1601, 47, 7647, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_spanish = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_spanish) self.assertNotIn(self.tokenizer.eos_token, result) def test_tokenizer_adds_special_tokens(self): self.tokenizer.tgt_lang = "fr" encoded = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0], FR_CODE) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id) def test_tgt_lang_setter(self): self.tokenizer.tgt_lang = "fr" self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE]) self.tokenizer.tgt_lang = "es" self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE])
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch speech2text model import unittest from transformers import speech2text2config from transformers testingutils import istorchavailable requiretorch torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers models speechtotext2 modelingspeechtotext2 import speech2text2decoder speech2text2forcausallm requiretorch class speech2text2standalonedecodermodeltester def init self parent vocabsize99 batchsize13 dmodel16 decoderseqlength7 istrainingtrue isdecodertrue useattentionmasktrue usecachefalse uselabelstrue decoderstarttokenid2 decoderffndim32 decoderlayers2 decoderattentionheads4 maxpositionembeddings30 padtokenid0 bostokenid1 eostokenid2 scopenone self parent parent self batchsize batchsize self decoderseqlength decoderseqlength for common tests self seqlength self decoderseqlength self istraining istraining self useattentionmask useattentionmask self uselabels uselabels self vocabsize vocabsize self dmodel dmodel self hiddensize dmodel self numhiddenlayers decoderlayers self decoderlayers decoderlayers self decoderffndim decoderffndim self decoderattentionheads decoderattentionheads self numattentionheads decoderattentionheads self eostokenid eostokenid self bostokenid bostokenid self padtokenid padtokenid self decoderstarttokenid decoderstarttokenid self usecache usecache self maxpositionembeddings maxpositionembeddings self scope none self decoderkeylength decoderseqlength self basemodeloutlen 2 self decoderattentionidx 1 def prepareconfigandinputsself inputids idstensorself batchsize self decoderseqlength self vocabsize attentionmask none if self useattentionmask attentionmask idstensorself batchsize self decoderseqlength vocabsize2 lmlabels none if self uselabels lmlabels idstensorself batchsize self decoderseqlength self vocabsize config speech2text2config vocabsizeself vocabsize dmodelself dmodel decoderlayersself decoderlayers decoderffndimself decoderffndim decoderattentionheadsself decoderattentionheads eostokenidself eostokenid bostokenidself bostokenid usecacheself usecache padtokenidself padtokenid decoderstarttokenidself decoderstarttokenid maxpositionembeddingsself maxpositionembeddings return config inputids attentionmask lmlabels def createandcheckdecodermodelpast self config inputids attentionmask lmlabels config usecache true model speech2text2decoderconfigconfig totorchdevice eval inputids inputids 2 inputidsinputids 0 1 first forward pass outputs modelinputids usecachetrue outputsusecacheconf modelinputids outputsnopast modelinputids usecachefalse self parent asserttruelenoutputs lenoutputsusecacheconf self parent asserttruelenoutputs lenoutputsnopast 1 pastkeyvalues outputspastkeyvalues create hypothetical next token and extent to nextinputids nexttokens idstensor2 1 config vocabsize 1 1 append to next inputids and nextinputids torch catinputids nexttokens dim1 printnextinputids outputfromnopast modelnextinputidslasthiddenstate outputfrompast modelnexttokens pastkeyvaluespastkeyvalueslasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast nextinputids shape1 1 randomsliceidx detach outputfrompastslice outputfrompast 0 randomsliceidx detach test that outputs are equal for slice assert torch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask lmlabels configandinputs inputsdict inputids inputids attentionmask attentionmask return config inputsdict requiretorch class speech2text2standalonedecodermodeltest modeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses speech2text2decoder speech2text2forcausallm if istorchavailable else allgenerativemodelclasses speech2text2forcausallm if istorchavailable else pipelinemodelmapping textgeneration speech2text2forcausallm if istorchavailable else fxcompatible true testpruning false def setup self self modeltester speech2text2standalonedecodermodeltesterself istrainingfalse self configtester configtesterself configclassspeech2text2config not implemented currently def testinputsembedsself pass unittest skipthis test is currently broken because of safetensors def testtffromptsafetensorsself pass speech2text2 has no base model def testsaveloadfastinitfrombaseself pass speech2text2 has no base model def testsaveloadfastinittobaseself pass def testconfigself self configtester runcommontests def testdecodermodelpastself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastconfigandinputs decoder cannot keep gradients def testretaingradhiddenstatesattentionsself return coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch speech2text model for common tests first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice not implemented currently speech2text2 has no base model speech2text2 has no base model decoder cannot keep gradients
import unittest from transformers import Speech2Text2Config from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.speech_to_text_2.modeling_speech_to_text_2 import ( Speech2Text2Decoder, Speech2Text2ForCausalLM, ) @require_torch class Speech2Text2StandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, decoder_attention_heads=4, max_position_embeddings=30, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = Speech2Text2Config( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = Speech2Text2Decoder(config=config).to(torch_device).eval() input_ids = input_ids[:2] input_ids[input_ids == 0] += 1 outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] next_tokens = ids_tensor((2, 1), config.vocab_size - 1) + 1 next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) print(next_input_ids) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class Speech2Text2StandaloneDecoderModelTest( ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase ): all_model_classes = (Speech2Text2Decoder, Speech2Text2ForCausalLM) if is_torch_available() else () all_generative_model_classes = (Speech2Text2ForCausalLM,) if is_torch_available() else () pipeline_model_mapping = {"text-generation": Speech2Text2ForCausalLM} if is_torch_available() else {} fx_compatible = True test_pruning = False def setUp( self, ): self.model_tester = Speech2Text2StandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=Speech2Text2Config) def test_inputs_embeds(self): pass @unittest.skip("This test is currently broken because of safetensors.") def test_tf_from_pt_safetensors(self): pass def test_save_load_fast_init_from_base(self): pass def test_save_load_fast_init_to_base(self): pass def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): return
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make sure is correctly concatenated load tokenizer without merges file should not throw an error save tokenizer and load again overwrite since mergesfile is optional 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make sure is correctly concatenated here couple words of the load tokenizer without merges file should not throw an error save tokenizer and load again overwrite since merges_file is optional
import inspect import json import os import tempfile import unittest from transformers.models.speech_to_text_2 import Speech2Text2Tokenizer from transformers.models.speech_to_text_2.tokenization_speech_to_text_2 import VOCAB_FILES_NAMES from ...test_tokenization_common import TokenizerTesterMixin class SpeechToTextTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = Speech2Text2Tokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = "<s> <pad> </s> <unk> here@@ a couple of@@ words for the he@@ re@@ vocab".split(" ") merges = ["he re</w> 123", "here a 1456"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-1], "vocab") self.assertEqual(len(vocab_keys), 14) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 14) def test_tokenizer_decode(self): tokenizer = Speech2Text2Tokenizer.from_pretrained(self.tmpdirname) token_ids = [4, 6, 8, 7, 10] output_string = tokenizer.decode(token_ids) self.assertTrue(output_string == "herecouple words ofthe") def test_load_no_merges_file(self): tokenizer = Speech2Text2Tokenizer.from_pretrained(self.tmpdirname) with tempfile.TemporaryDirectory() as tmp_dirname: tokenizer.save_pretrained(tmp_dirname) os.remove(os.path.join(tmp_dirname, "merges.txt")) tokenizer = Speech2Text2Tokenizer.from_pretrained(tmp_dirname) with tempfile.TemporaryDirectory() as tmp_dirname: tokenizer.save_pretrained(tmp_dirname) tokenizer = Speech2Text2Tokenizer.from_pretrained(tmp_dirname) self.assertIsNotNone(tokenizer) def test_tokenizer_slow_store_full_signature(self): if not self.test_slow_tokenizer: return signature = inspect.signature(self.tokenizer_class.__init__) tokenizer = self.get_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty and parameter_name != "merges_file": self.assertIn(parameter_name, tokenizer.init_kwargs)
codingutf8 20212023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the speecht5 feature extractors import itertools import random import unittest import numpy as np from transformers import batchfeature speecht5featureextractor from transformers testingutils import requiretorch from transformers utils importutils import istorchavailable from testsequencefeatureextractioncommon import sequencefeatureextractiontestmixin if istorchavailable import torch globalrng random random copied from tests models whisper testfeatureextractionwhisper floatslist def floatslistshape scale1 0 rngnone namenone make sure that inputs increase in size make sure that inputs increase in size tests that all call wrap to encodeplus and batchencodeplus create three inputs of length 800 1000 and 1200 test not batched input test batched make sure that if maxlength longest then pad to maxlength make sure that if maxlength longest then pad to longest tests that all call wrap to encodeplus and batchencodeplus create three inputs of length 800 1000 and 1200 test feature size test not batched input test batched test 2d numpy arrays are batched automatic decoding with librispeech fmt off fmt on fmt off fmt on coding utf 8 2021 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the speecht5 feature extractors copied from tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor make sure that inputs increase in size make sure that inputs increase in size tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test not batched input test batched make sure that if max_length longest then pad to max_length make sure that if max_length longest then pad to longest tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test feature size test not batched input test batched test 2 d numpy arrays are batched hack hack hack automatic decoding with librispeech fmt off fmt on fmt off fmt on
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechT5FeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch class SpeechT5FeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=16000, do_normalize=True, num_mel_bins=80, hop_length=16, win_length=64, win_function="hann_window", fmin=80, fmax=7600, mel_floor=1e-10, return_attention_mask=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate self.do_normalize = do_normalize self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.fmin = fmin self.fmax = fmax self.mel_floor = mel_floor self.return_attention_mask = return_attention_mask def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs def prepare_inputs_for_target(self, equal_length=False, numpify=False): if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)] else: speech_inputs = [ floats_list((x, self.num_mel_bins)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch class SpeechT5FeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = SpeechT5FeatureExtractor def setUp(self): self.feat_extract_tester = SpeechT5FeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3)) def test_call(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] encoded_sequences_1 = feat_extract(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_zero_mean_unit_variance_normalization_np(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, padding=padding, max_length=max_length, return_tensors="np") input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self.assertTrue(input_values[0][800:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[1][:1000]) self.assertTrue(input_values[0][1000:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) lengths = range(800, 1400, 200) speech_inputs = [floats_list((1, x))[0] for x in lengths] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, max_length=max_length, padding=padding) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self._check_zero_mean_unit_variance(input_values[1][:1000]) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization_trunc_np_max_length(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="max_length", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1]) self._check_zero_mean_unit_variance(input_values[2]) def test_zero_mean_unit_variance_normalization_trunc_np_longest(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) self.assertTrue(input_values.shape == (3, 1000)) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=2000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) self.assertTrue(input_values.shape == (3, 1200)) def test_double_precision_pad(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) def test_call_target(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] input_values = feature_extractor(audio_target=np_speech_inputs, padding=True, return_tensors="np").input_values self.assertTrue(input_values.ndim == 3) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins) encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_batch_feature_target(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name]))) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target(equal_length=True) processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins) ) @require_torch def test_batch_feature_target_pt(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_target(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins) ) @require_torch def test_padding_accepts_tensors_target_pt(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) feat_extract.feature_size = feat_extract.num_mel_bins input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().astype(np.float32).sum()) < 1e-2) def test_attention_mask_target(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) feat_extract.feature_size = feat_extract.num_mel_bins processed = feat_extract.pad(processed, padding="longest", return_tensors="np") self.assertIn("attention_mask", processed) self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lengths) def test_attention_mask_with_truncation_target(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) max_length = min(input_lengths) feat_extract.feature_size = feat_extract.num_mel_bins processed_pad = feat_extract.pad( processed, padding="max_length", max_length=max_length, truncation=True, return_tensors="np" ) self.assertIn("attention_mask", processed_pad) self.assertListEqual( list(processed_pad.attention_mask.shape), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist(), [max_length for x in speech_inputs] ) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration(self): EXPECTED_INPUT_VALUES = torch.tensor( [2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03, 3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03, 2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04, 4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03, 7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04, 4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] ) input_speech = self._load_datasamples(1) feature_extractor = SpeechT5FeatureExtractor() input_values = feature_extractor(input_speech, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 93680)) self.assertTrue(torch.allclose(input_values[0, :30], EXPECTED_INPUT_VALUES, atol=1e-6)) def test_integration_target(self): EXPECTED_INPUT_VALUES = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) input_speech = self._load_datasamples(1) feature_extractor = SpeechT5FeatureExtractor() input_values = feature_extractor(audio_target=input_speech, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 366, 80)) self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch speecht5 model import copy import inspect import tempfile import unittest from transformers import speecht5config speecht5hifiganconfig from transformers testingutils import istorchavailable requiresentencepiece requiretokenizers requiretorch slow torchdevice from transformers trainerutils import setseed from transformers utils import cachedproperty from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import speecht5forspeechtospeech speecht5forspeechtotext speecht5fortexttospeech speecht5hifigan speecht5model speecht5processor def prepareinputsdict config inputidsnone inputvaluesnone decoderinputidsnone decoderinputvaluesnone attentionmasknone decoderattentionmasknone headmasknone decoderheadmasknone crossattnheadmasknone if inputids is not none encoderdict inputids inputids else encoderdict inputvalues inputvalues if decoderinputids is not none decoderdict decoderinputids decoderinputids else decoderdict decoderinputvalues decoderinputvalues if headmask is none headmask torch onesconfig encoderlayers config encoderattentionheads devicetorchdevice if decoderheadmask is none decoderheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice if crossattnheadmask is none crossattnheadmask torch onesconfig decoderlayers config decoderattentionheads devicetorchdevice return encoderdict decoderdict attentionmask attentionmask decoderattentionmask decoderattentionmask headmask headmask decoderheadmask decoderheadmask crossattnheadmask crossattnheadmask requiretorch class speecht5modeltester def init self parent batchsize13 seqlength7 istrainingfalse vocabsize81 hiddensize24 numhiddenlayers2 numattentionheads2 intermediatesize4 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize def prepareconfigandinputsself inputvalues floatstensorself batchsize self seqlength self hiddensize scale1 0 attentionmask randomattentionmaskself batchsize self seqlength decoderinputvalues floatstensorself batchsize self seqlength self hiddensize scale1 0 decoderattentionmask randomattentionmaskself batchsize self seqlength config self getconfig inputsdict prepareinputsdict config inputvaluesinputvalues decoderinputvaluesdecoderinputvalues attentionmaskattentionmask decoderattentionmaskdecoderattentionmask return config inputsdict def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def getconfigself return speecht5config vocabsizeself vocabsize hiddensizeself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize def createandcheckmodelforwardself config inputsdict model speecht5modelconfigconfig totorchdevice eval inputvalues inputsdictinputvalues attentionmask inputsdictattentionmask decoderinputvalues inputsdictdecoderinputvalues result modelinputvalues attentionmaskattentionmask decoderinputvaluesdecoderinputvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize requiretorch class speecht5modeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses speecht5model if istorchavailable else pipelinemodelmapping automaticspeechrecognition speecht5forspeechtotext featureextraction speecht5model if istorchavailable else isencoderdecoder true testpruning false testheadmasking false testresizeembeddings false inputname inputvalues def setupself self modeltester speecht5modeltesterself self configtester configtesterself configclassspeecht5config hiddensize37 def testconfigself self configtester runcommontests def testmodelforwardself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelforwardconfigandinputs def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputvalues attentionmask decoderinputvalues decoderattentionmask expectedargnames extend headmask decoderheadmask crossattnheadmask encoderoutputs if headmask and decoderheadmask and crossattnheadmask in argnames else encoderoutputs self assertlistequalargnames lenexpectedargnames expectedargnames this model has no inputsembeds def testinputsembedsself pass this model has no input embeddings def testmodelcommonattributesself pass def testretaingradhiddenstatesattentionsself decoder cannot keep gradients pass slow def testtorchscriptoutputattentionsself disabled because this model doesn t have decoderinputids pass slow def testtorchscriptoutputhiddenstateself disabled because this model doesn t have decoderinputids pass slow def testtorchscriptsimpleself disabled because this model doesn t have decoderinputids pass requiretorch class speecht5forspeechtotexttester def init self parent batchsize13 encoderseqlength1024 speech is longer decoderseqlength7 istrainingfalse hiddensize24 numhiddenlayers2 numattentionheads2 intermediatesize4 convdim32 32 32 convstride4 4 4 convkernel8 8 8 convbiasfalse numconvposembeddings16 numconvposembeddinggroups2 vocabsize81 self parent parent self batchsize batchsize self encoderseqlength encoderseqlength self decoderseqlength decoderseqlength self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self vocabsize vocabsize def prepareconfigandinputsself inputvalues floatstensorself batchsize self encoderseqlength scale1 0 attentionmask randomattentionmaskself batchsize self encoderseqlength decoderinputids idstensorself batchsize self decoderseqlength self vocabsize clamp2 decoderattentionmask randomattentionmaskself batchsize self decoderseqlength config self getconfig inputsdict prepareinputsdict config inputvaluesinputvalues decoderinputidsdecoderinputids attentionmaskattentionmask decoderattentionmaskdecoderattentionmask return config inputsdict def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def getconfigself return speecht5config hiddensizeself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups vocabsizeself vocabsize def createandcheckmodelforwardself config inputsdict model speecht5forspeechtotextconfigconfig totorchdevice eval inputvalues inputsdictinputvalues attentionmask inputsdictattentionmask decoderinputids inputsdictdecoderinputids result modelinputvalues attentionmaskattentionmask decoderinputidsdecoderinputids self parent assertequalresult logits shape self batchsize self decoderseqlength self vocabsize def createandcheckdecodermodelpastlargeinputsself config inputsdict model speecht5forspeechtotextconfigconfig getdecoder totorchdevice eval inputids inputsdictdecoderinputids attentionmask inputsdictdecoderattentionmask first forward pass outputs modelinputids attentionmaskattentionmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttokens idstensorself batchsize 3 config vocabsize clamp2 nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids torch catinputids nexttokens dim1 nextattentionmask torch catattentionmask nextattnmask dim1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast 3 randomsliceidx detach outputfrompastslice outputfrompast randomsliceidx detach self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruetorch allcloseoutputfrompastslice outputfromnopastslice atol1e2 requiretorch class speecht5forspeechtotexttestmodeltestermixin unittest testcase allmodelclasses speecht5forspeechtotext if istorchavailable else allgenerativemodelclasses speecht5forspeechtotext if istorchavailable else isencoderdecoder true testpruning false testheadmasking false inputname inputvalues def setupself self modeltester speecht5forspeechtotexttesterself self configtester configtesterself configclassspeecht5config hiddensize37 def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testmodelforwardself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelforwardconfigandinputs def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen decoderkeylength getattrself modeltester decoderkeylength decoderseqlength encoderkeylength getattrself modeltester keylength encoderseqlength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval subsampledencoderseqlength model speecht5 encoder prenet getfeatextractoutputlengths encoderseqlength subsampledencoderkeylength model speecht5 encoder prenet getfeatextractoutputlengths encoderkeylength with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength outlen lenoutputs correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength subsampledencoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass addedhiddenstates 2 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputvalues attentionmask decoderinputids decoderattentionmask expectedargnames extend headmask decoderheadmask crossattnheadmask encoderoutputs if headmask and decoderheadmask and crossattnheadmask in argnames else encoderoutputs self assertlistequalargnames lenexpectedargnames expectedargnames def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester encoderseqlength else seqlength self modeltester seqlength subsampledseqlength model speecht5 encoder prenet getfeatextractoutputlengthsseqlength self assertlistequal listhiddenstates0 shape2 subsampledseqlength self modeltester hiddensize if config isencoderdecoder hiddenstates outputs decoderhiddenstates self assertisinstancehiddenstates list tuple self assertequallenhiddenstates expectednumlayers seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen self assertlistequal listhiddenstates0 shape2 decoderseqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed featureprojection projection weight featureprojection projection bias if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized this model has no inputsembeds def testinputsembedsself pass def testresizeembeddingsuntiedself originalconfig inputsdict self modeltester prepareconfigandinputsforcommon if not self testresizeembeddings return originalconfig tiewordembeddings false if model cannot untied embeddings leave test if originalconfig tiewordembeddings return for modelclass in self allmodelclasses config copy deepcopyoriginalconfig model modelclassconfig totorchdevice if no output embeddings leave test if model getoutputembeddings is none continue check that resizing the token embeddings with a larger vocab size increases the model s vocab size modelvocabsize config vocabsize model resizetokenembeddingsmodelvocabsize 10 self assertequalmodel config vocabsize modelvocabsize 10 outputembeds model getoutputembeddings self assertequaloutputembeds weight shape0 modelvocabsize 10 check bias if present if outputembeds bias is not none self assertequaloutputembeds bias shape0 modelvocabsize 10 check that the model can still do a forward pass successfully every parameter should be resized modelself prepareforclassinputsdict modelclass check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size model resizetokenembeddingsmodelvocabsize 15 self assertequalmodel config vocabsize modelvocabsize 15 check that it actually resizes the embeddings matrix outputembeds model getoutputembeddings self assertequaloutputembeds weight shape0 modelvocabsize 15 check bias if present if outputembeds bias is not none self assertequaloutputembeds bias shape0 modelvocabsize 15 check that the model can still do a forward pass successfully every parameter should be resized if decoderinputids in inputsdict inputsdictdecoderinputids clampmaxmodelvocabsize 15 1 check that the model can still do a forward pass successfully every parameter should be resized modelself prepareforclassinputsdict modelclass def testresizetokensembeddingsself originalconfig inputsdict self modeltester prepareconfigandinputsforcommon if not self testresizeembeddings return for modelclass in self allmodelclasses config copy deepcopyoriginalconfig model modelclassconfig model totorchdevice if self modeltester istraining is false model eval modelvocabsize config vocabsize retrieve the embeddings and clone theme modelembed model resizetokenembeddingsmodelvocabsize clonedembeddings modelembed weight clone check that resizing the token embeddings with a larger vocab size increases the model s vocab size modelembed model resizetokenembeddingsmodelvocabsize 10 self assertequalmodel config vocabsize modelvocabsize 10 check that it actually resizes the embeddings matrix self assertequalmodelembed weight shape0 clonedembeddings shape0 10 check that the model can still do a forward pass successfully every parameter should be resized modelself prepareforclassinputsdict modelclass check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size modelembed model resizetokenembeddingsmodelvocabsize 15 self assertequalmodel config vocabsize modelvocabsize 15 check that it actually resizes the embeddings matrix self assertequalmodelembed weight shape0 clonedembeddings shape0 15 make sure that decoderinputids are resized if decoderinputids in inputsdict inputsdictdecoderinputids clampmaxmodelvocabsize 15 1 modelself prepareforclassinputsdict modelclass check that adding and removing tokens has not modified the first part of the embedding matrix modelsequal true for p1 p2 in zipclonedembeddings modelembed weight if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testretaingradhiddenstatesattentionsself decoder cannot keep gradients pass training is not supported yet def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 requiretorch requiresentencepiece requiretokenizers slow class speecht5forspeechtotextintegrationtestsunittest testcase cachedproperty def defaultprocessorself return speecht5processor frompretrainedmicrosoftspeecht5asr def loaddatasamplesself numsamples from datasets import loaddataset ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples def testgenerationlibrispeechself model speecht5forspeechtotext frompretrainedmicrosoftspeecht5asr model totorchdevice processor self defaultprocessor inputspeech self loaddatasamples1 inputvalues processoraudioinputspeech returntensorspt inputvalues totorchdevice generatedids model generateinputvalues generatedtranscript processor batchdecodegeneratedids skipspecialtokenstrue expectedtranscriptions mister quilter is the apostle of the middle classes and we are glad to welcome his gospel self assertlistequalgeneratedtranscript expectedtranscriptions def testgenerationlibrispeechbatchedself model speecht5forspeechtotext frompretrainedmicrosoftspeecht5asr model totorchdevice processor self defaultprocessor inputspeech self loaddatasamples4 inputs processoraudioinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice generatedids model generateinputvalues attentionmaskattentionmask generatedtranscripts processor batchdecodegeneratedids skipspecialtokenstrue expectedtranscriptions mister quilter is the apostle of the middle classes and we are glad to welcome his gospel nor is mister quilter s manner less interesting than his matter he tells us that at this festive season of the year with christmas and rosebeaf looming before us similars drawn from eating and its results occur most readily to the mind he has grave doubts whether sir frederick latin s work is really greek after all and can discover in it but little of rocky ithica self assertlistequalgeneratedtranscripts expectedtranscriptions requiretorch class speecht5fortexttospeechtester def init self parent batchsize13 encoderseqlength7 decoderseqlength1024 speech is longer istrainingfalse hiddensize24 numhiddenlayers2 numattentionheads2 intermediatesize4 vocabsize81 nummelbins20 reductionfactor2 speechdecoderpostnetlayers2 speechdecoderpostnetunits32 speechdecoderprenetunits32 self parent parent self batchsize batchsize self encoderseqlength encoderseqlength self decoderseqlength decoderseqlength self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self vocabsize vocabsize self nummelbins nummelbins self reductionfactor reductionfactor self speechdecoderpostnetlayers speechdecoderpostnetlayers self speechdecoderpostnetunits speechdecoderpostnetunits self speechdecoderprenetunits speechdecoderprenetunits def prepareconfigandinputsself inputids idstensorself batchsize self encoderseqlength self vocabsize clamp2 attentionmask randomattentionmaskself batchsize self encoderseqlength decoderinputvalues floatstensorself batchsize self decoderseqlength self nummelbins scale1 0 decoderattentionmask randomattentionmaskself batchsize self decoderseqlength config self getconfig inputsdict prepareinputsdict config inputidsinputids decoderinputvaluesdecoderinputvalues attentionmaskattentionmask decoderattentionmaskdecoderattentionmask return config inputsdict def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def getconfigself return speecht5config hiddensizeself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize vocabsizeself vocabsize nummelbinsself nummelbins reductionfactorself reductionfactor speechdecoderpostnetlayersself speechdecoderpostnetlayers speechdecoderpostnetunitsself speechdecoderpostnetunits speechdecoderprenetunitsself speechdecoderprenetunits def createandcheckmodelforwardself config inputsdict model speecht5fortexttospeechconfigconfig totorchdevice eval inputids inputsdictinputids attentionmask inputsdictattentionmask decoderinputvalues inputsdictdecoderinputvalues result modelinputids attentionmaskattentionmask decoderinputvaluesdecoderinputvalues self parent assertequal result spectrogram shape self batchsize self decoderseqlength self reductionfactor self nummelbins requiretorch class speecht5fortexttospeechtestmodeltestermixin unittest testcase allmodelclasses speecht5fortexttospeech if istorchavailable else allgenerativemodelclasses speecht5fortexttospeech if istorchavailable else isencoderdecoder true testpruning false testheadmasking false inputname inputids def setupself self modeltester speecht5fortexttospeechtesterself self configtester configtesterself configclassspeecht5config hiddensize37 def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testmodelforwardself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelforwardconfigandinputs skipped because there is always dropout in speecht5speechdecoderprenet def testdecodermodelpastwithlargeinputsself pass skipped because there is always dropout in speecht5speechdecoderprenet def testdeterminismself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputids attentionmask decoderinputvalues decoderattentionmask expectedargnames extend headmask decoderheadmask crossattnheadmask encoderoutputs if headmask and decoderheadmask and crossattnheadmask in argnames else encoderoutputs self assertlistequalargnames lenexpectedargnames expectedargnames def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized this model has no inputsembeds def testinputsembedsself pass skipped because there is always dropout in speecht5speechdecoderprenet def testmodeloutputsequivalenceself pass skipped because there is always dropout in speecht5speechdecoderprenet def testsaveloadself pass def testretaingradhiddenstatesattentionsself decoder cannot keep gradients pass slow def testtorchscriptoutputattentionsself disabled because this model doesn t have decoderinputids pass slow def testtorchscriptoutputhiddenstateself disabled because this model doesn t have decoderinputids pass slow def testtorchscriptsimpleself disabled because this model doesn t have decoderinputids pass training is not supported yet def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 requiretorch requiresentencepiece requiretokenizers class speecht5fortexttospeechintegrationtestsunittest testcase cachedproperty def defaultmodelself return speecht5fortexttospeech frompretrainedmicrosoftspeecht5tts cachedproperty def defaultprocessorself return speecht5processor frompretrainedmicrosoftspeecht5tts cachedproperty def defaultvocoderself return speecht5hifigan frompretrainedmicrosoftspeecht5hifigan def testgenerationself model self defaultmodel model totorchdevice processor self defaultprocessor setseed555 make deterministic speakerembeddings torch zeros1 512 totorchdevice inputtext mister quilter is the apostle of the middle classes and we are glad to welcome his gospel inputids processortextinputtext returntensorspt inputids totorchdevice generatedspeech model generatespeechinputids speakerembeddingsspeakerembeddings self assertequalgeneratedspeech shape 230 model config nummelbins setseed555 make deterministic test model generate same method than generatespeech but with additional kwargs to absorb kwargs such as attentionmask generatedspeechwithgenerate model generate inputids attentionmasknone speakerembeddingsspeakerembeddings self assertequalgeneratedspeechwithgenerate shape 230 model config nummelbins def testbatchgenerationself model self defaultmodel model totorchdevice processor self defaultprocessor vocoder self defaultvocoder setseed555 make deterministic inputtext mister quilter is the apostle of the middle classes and we are glad to welcome his gospel nor is mister quilter s manner less interesting than his matter he tells us that at this festive season of the year with christmas and rosebeaf looming before us inputs processortextinputtext paddingmaxlength maxlength128 returntensorspt totorchdevice speakerembeddings torch zeros1 512 devicetorchdevice spectrograms spectrogramlengths model generatespeech inputidsinputsinputids speakerembeddingsspeakerembeddings attentionmaskinputsattentionmask returnoutputlengthstrue self assertequalspectrograms shape 3 262 model config nummelbins waveforms vocoderspectrograms waveformlengths intwaveforms size1 maxspectrogramlengths i for i in spectrogramlengths check waveform results are the same with or without using vocder setseed555 waveformswithvocoder waveformlengthswithvocoder model generatespeech inputidsinputsinputids speakerembeddingsspeakerembeddings attentionmaskinputsattentionmask vocodervocoder returnoutputlengthstrue self asserttruetorch allclosewaveforms waveformswithvocoder atol1e8 self assertequalwaveformlengths waveformlengthswithvocoder check waveform results are the same with returnconcretelengthstruefalse setseed555 waveformswithvocodernolengths model generatespeech inputidsinputsinputids speakerembeddingsspeakerembeddings attentionmaskinputsattentionmask vocodervocoder returnoutputlengthsfalse self asserttruetorch allclosewaveformswithvocodernolengths waveformswithvocoder atol1e8 check results when batching are consistent with results without batching for i text in enumerateinputtext setseed555 make deterministic inputs processortexttext paddingmaxlength maxlength128 returntensorspt totorchdevice spectrogram model generatespeech inputidsinputsinputids speakerembeddingsspeakerembeddings self assertequalspectrogram shape spectrogramsi spectrogramlengthsi shape self asserttruetorch allclosespectrogram spectrogramsi spectrogramlengthsi atol5e3 waveform vocoderspectrogram self assertequalwaveform shape waveformsi waveformlengthsi shape check whether waveforms are the same withwithout passing vocoder setseed555 waveformwithvocoder model generatespeech inputidsinputsinputids speakerembeddingsspeakerembeddings vocodervocoder self asserttruetorch allclosewaveform waveformwithvocoder atol1e8 requiretorch class speecht5forspeechtospeechtester def init self parent batchsize13 encoderseqlength1024 speech is longer decoderseqlength1024 istrainingfalse hiddensize24 numhiddenlayers2 numattentionheads2 intermediatesize4 convdim32 32 32 convstride4 4 4 convkernel8 8 8 convbiasfalse numconvposembeddings16 numconvposembeddinggroups2 vocabsize81 nummelbins20 reductionfactor2 speechdecoderpostnetlayers2 speechdecoderpostnetunits32 speechdecoderprenetunits32 self parent parent self batchsize batchsize self encoderseqlength encoderseqlength self decoderseqlength decoderseqlength self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self vocabsize vocabsize self nummelbins nummelbins self reductionfactor reductionfactor self speechdecoderpostnetlayers speechdecoderpostnetlayers self speechdecoderpostnetunits speechdecoderpostnetunits self speechdecoderprenetunits speechdecoderprenetunits def prepareconfigandinputsself inputvalues floatstensorself batchsize self encoderseqlength scale1 0 attentionmask randomattentionmaskself batchsize self encoderseqlength decoderinputvalues floatstensorself batchsize self decoderseqlength self nummelbins scale1 0 decoderattentionmask randomattentionmaskself batchsize self decoderseqlength config self getconfig inputsdict prepareinputsdict config inputvaluesinputvalues decoderinputvaluesdecoderinputvalues attentionmaskattentionmask decoderattentionmaskdecoderattentionmask return config inputsdict def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def getconfigself return speecht5config hiddensizeself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups vocabsizeself vocabsize nummelbinsself nummelbins reductionfactorself reductionfactor speechdecoderpostnetlayersself speechdecoderpostnetlayers speechdecoderpostnetunitsself speechdecoderpostnetunits speechdecoderprenetunitsself speechdecoderprenetunits def createandcheckmodelforwardself config inputsdict model speecht5forspeechtospeechconfigconfig totorchdevice eval inputvalues inputsdictinputvalues attentionmask inputsdictattentionmask decoderinputvalues inputsdictdecoderinputvalues result modelinputvalues attentionmaskattentionmask decoderinputvaluesdecoderinputvalues self parent assertequal result spectrogram shape self batchsize self decoderseqlength self reductionfactor self nummelbins requiretorch class speecht5forspeechtospeechtestmodeltestermixin unittest testcase allmodelclasses speecht5forspeechtospeech if istorchavailable else allgenerativemodelclasses speecht5forspeechtospeech if istorchavailable else isencoderdecoder true testpruning false testheadmasking false testresizeembeddings false inputname inputvalues def setupself self modeltester speecht5forspeechtospeechtesterself self configtester configtesterself configclassspeecht5config hiddensize37 def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testmodelforwardself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelforwardconfigandinputs skipped because there is always dropout in speecht5speechdecoderprenet def testdecodermodelpastwithlargeinputsself pass skipped because there is always dropout in speecht5speechdecoderprenet def testdeterminismself pass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen decoderkeylength getattrself modeltester decoderkeylength decoderseqlength encoderkeylength getattrself modeltester keylength encoderseqlength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval subsampledencoderseqlength model speecht5 encoder prenet getfeatextractoutputlengths encoderseqlength subsampledencoderkeylength model speecht5 encoder prenet getfeatextractoutputlengths encoderkeylength with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength outlen lenoutputs correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength subsampledencoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass addedhiddenstates 2 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputvalues attentionmask decoderinputvalues decoderattentionmask expectedargnames extend headmask decoderheadmask crossattnheadmask encoderoutputs if headmask and decoderheadmask and crossattnheadmask in argnames else encoderoutputs self assertlistequalargnames lenexpectedargnames expectedargnames def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester encoderseqlength else seqlength self modeltester seqlength subsampledseqlength model speecht5 encoder prenet getfeatextractoutputlengthsseqlength self assertlistequal listhiddenstates0 shape2 subsampledseqlength self modeltester hiddensize if config isencoderdecoder hiddenstates outputs decoderhiddenstates self assertisinstancehiddenstates list tuple self assertequallenhiddenstates expectednumlayers seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen self assertlistequal listhiddenstates0 shape2 decoderseqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed featureprojection projection weight featureprojection projection bias if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized this model has no inputsembeds def testinputsembedsself pass this model has no input embeddings def testmodelcommonattributesself pass skipped because there is always dropout in speecht5speechdecoderprenet def testmodeloutputsequivalenceself pass def testretaingradhiddenstatesattentionsself decoder cannot keep gradients pass skipped because there is always dropout in speecht5speechdecoderprenet def testsaveloadself pass slow def testtorchscriptoutputattentionsself disabled because this model doesn t have decoderinputids pass slow def testtorchscriptoutputhiddenstateself disabled because this model doesn t have decoderinputids pass slow def testtorchscriptsimpleself disabled because this model doesn t have decoderinputids pass training is not supported yet def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 requiretorch requiresentencepiece requiretokenizers slow class speecht5forspeechtospeechintegrationtestsunittest testcase cachedproperty def defaultprocessorself return speecht5processor frompretrainedmicrosoftspeecht5vc def loaddatasamplesself numsamples from datasets import loaddataset ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples def testgenerationlibrispeechself model speecht5forspeechtospeech frompretrainedmicrosoftspeecht5vc model totorchdevice processor self defaultprocessor inputspeech self loaddatasamples1 inputvalues processoraudioinputspeech returntensorspt inputvalues totorchdevice speakerembeddings torch zeros1 512 devicetorchdevice generatedspeech model generatespeechinputvalues speakerembeddingsspeakerembeddings self assertequalgeneratedspeech shape1 model config nummelbins self assertgreaterequalgeneratedspeech shape0 300 self assertlessequalgeneratedspeech shape0 310 class speecht5hifigantester def init self parent batchsize13 seqlength7 istrainingfalse nummelbins20 self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self nummelbins nummelbins def prepareconfigandinputsself inputvalues floatstensorself seqlength self nummelbins scale1 0 config self getconfig return config inputvalues def getconfigself return speecht5hifiganconfig modelindimself nummelbins upsampleinitialchannel32 def createandcheckmodelself config inputvalues model speecht5hifiganconfigconfig totorchdevice eval result modelinputvalues self parent assertequalresult shape self seqlength 256 def prepareconfigandinputsforcommonself config inputvalues self prepareconfigandinputs inputsdict spectrogram inputvalues return config inputsdict requiretorch class speecht5hifigantestmodeltestermixin unittest testcase allmodelclasses speecht5hifigan if istorchavailable else testtorchscript false testpruning false testresizeembeddings false testresizepositionembeddings false testheadmasking false testmismatchedshapes false testmissingkeys false testmodelparallel false isencoderdecoder false hasattentions false inputname spectrogram def setupself self modeltester speecht5hifigantesterself self configtester configtesterself configclassspeecht5hifiganconfig def testconfigself self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigfromandsavepretrainedsubfolder self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames spectrogram self assertlistequalargnames lenexpectedargnames expectedargnames this model does not output hidden states def testhiddenstatesoutputself pass skip def testinitializationself pass this model has no inputsembeds def testinputsembedsself pass this model has no input embeddings def testmodelcommonattributesself pass skip as this model doesn t support all arguments tested def testmodeloutputsequivalenceself pass this model does not output hidden states def testretaingradhiddenstatesattentionsself pass skip because it fails on automapping of speecht5hifiganconfig def testsaveloadfastinitfrombaseself pass skip because it fails on automapping of speecht5hifiganconfig def testsaveloadfastinittobaseself pass def testbatchedinputsoutputsself config inputs self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval batchedinputs inputsspectrogram unsqueeze0 repeat2 1 1 with torch nograd batchedoutputs modelbatchedinputs totorchdevice self assertequal batchedinputs shape0 batchedoutputs shape0 msggot different batch dims for input and output def testunbatchedinputsoutputsself config inputs self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval with torch nograd outputs modelinputsspectrogram totorchdevice self asserttrueoutputs dim 1 msggot unbatched inputs but batched output coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch speecht5 model signature parameters is an ordereddict so arg_names order is deterministic this model has no inputs_embeds this model has no input embeddings decoder cannot keep gradients disabled because this model doesn t have decoder_input_ids disabled because this model doesn t have decoder_input_ids disabled because this model doesn t have decoder_input_ids speech is longer first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice check that output_attentions also work using config loss is at first position loss is added to beginning past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine signature parameters is an ordereddict so arg_names order is deterministic check that output_hidden_states also work using config this model has no inputs_embeds if model cannot untied embeddings leave test if no output embeddings leave test check that resizing the token embeddings with a larger vocab size increases the model s vocab size check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that the model can still do a forward pass successfully every parameter should be resized retrieve the embeddings and clone theme check that resizing the token embeddings with a larger vocab size increases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix make sure that decoder_input_ids are resized check that adding and removing tokens has not modified the first part of the embedding matrix decoder cannot keep gradients training is not supported yet overwrite from test_modeling_common automatic decoding with librispeech speech is longer skipped because there is always dropout in speecht5speechdecoderprenet skipped because there is always dropout in speecht5speechdecoderprenet signature parameters is an ordereddict so arg_names order is deterministic this model has no inputs_embeds skipped because there is always dropout in speecht5speechdecoderprenet skipped because there is always dropout in speecht5speechdecoderprenet decoder cannot keep gradients disabled because this model doesn t have decoder_input_ids disabled because this model doesn t have decoder_input_ids disabled because this model doesn t have decoder_input_ids training is not supported yet overwrite from test_modeling_common make deterministic make deterministic test model generate same method than generate_speech but with additional kwargs to absorb kwargs such as attention_mask make deterministic check waveform results are the same with or without using vocder check waveform results are the same with return_concrete_lengths true false check results when batching are consistent with results without batching make deterministic check whether waveforms are the same with without passing vocoder speech is longer skipped because there is always dropout in speecht5speechdecoderprenet skipped because there is always dropout in speecht5speechdecoderprenet check that output_attentions also work using config loss is at first position loss is added to beginning past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine signature parameters is an ordereddict so arg_names order is deterministic check that output_hidden_states also work using config this model has no inputs_embeds this model has no input embeddings skipped because there is always dropout in speecht5speechdecoderprenet decoder cannot keep gradients skipped because there is always dropout in speecht5speechdecoderprenet disabled because this model doesn t have decoder_input_ids disabled because this model doesn t have decoder_input_ids disabled because this model doesn t have decoder_input_ids training is not supported yet overwrite from test_modeling_common automatic decoding with librispeech signature parameters is an ordereddict so arg_names order is deterministic this model does not output hidden states skip this model has no inputs_embeds this model has no input embeddings skip as this model doesn t support all arguments tested this model does not output hidden states skip because it fails on automapping of speecht5hifiganconfig skip because it fails on automapping of speecht5hifiganconfig
import copy import inspect import tempfile import unittest from transformers import SpeechT5Config, SpeechT5HifiGanConfig from transformers.testing_utils import ( is_torch_available, require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.trainer_utils import set_seed from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SpeechT5ForSpeechToSpeech, SpeechT5ForSpeechToText, SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Model, SpeechT5Processor, ) def prepare_inputs_dict( config, input_ids=None, input_values=None, decoder_input_ids=None, decoder_input_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if input_ids is not None: encoder_dict = {"input_ids": input_ids} else: encoder_dict = {"input_values": input_values} if decoder_input_ids is not None: decoder_dict = {"decoder_input_ids": decoder_input_ids} else: decoder_dict = {"decoder_input_values": decoder_input_values} if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { **encoder_dict, **decoder_dict, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class SpeechT5ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, vocab_size=81, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, intermediate_size=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length, self.hidden_size], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) decoder_input_values = floats_tensor([self.batch_size, self.seq_length, self.hidden_size], scale=1.0) decoder_attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_values=input_values, decoder_input_values=decoder_input_values, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5Model(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] decoder_input_values = inputs_dict["decoder_input_values"] result = model(input_values, attention_mask=attention_mask, decoder_input_values=decoder_input_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) @require_torch class SpeechT5ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5Model,) if is_torch_available() else () pipeline_model_mapping = ( {"automatic-speech-recognition": SpeechT5ForSpeechToText, "feature-extraction": SpeechT5Model} if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False input_name = "input_values" def setUp(self): self.model_tester = SpeechT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_values", "attention_mask", "decoder_input_values", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_inputs_embeds(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): pass @slow def test_torchscript_output_attentions(self): pass @slow def test_torchscript_output_hidden_state(self): pass @slow def test_torchscript_simple(self): pass @require_torch class SpeechT5ForSpeechToTextTester: def __init__( self, parent, batch_size=13, encoder_seq_length=1024, decoder_seq_length=7, is_training=False, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, intermediate_size=4, conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, vocab_size=81, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.vocab_size = vocab_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.encoder_seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length]) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size).clamp(2) decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_values=input_values, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, vocab_size=self.vocab_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5ForSpeechToText(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] decoder_input_ids = inputs_dict["decoder_input_ids"] result = model(input_values, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.decoder_seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = SpeechT5ForSpeechToText(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) @require_torch class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5ForSpeechToText,) if is_torch_available() else () all_generative_model_classes = (SpeechT5ForSpeechToText,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False input_name = "input_values" def setUp(self): self.model_tester = SpeechT5ForSpeechToTextTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_seq_length ) subsampled_encoder_key_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_key_length ) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_values", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "feature_projection.projection.weight", "feature_projection.projection.bias", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_inputs_embeds(self): pass def test_resize_embeddings_untied(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) if model.get_output_embeddings() is None: continue model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) def test_resize_tokens_embeddings(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_retain_grad_hidden_states_attentions(self): pass def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers @slow class SpeechT5ForSpeechToTextIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return SpeechT5Processor.from_pretrained("microsoft/speecht5_asr") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(1) input_values = processor(audio=input_speech, return_tensors="pt").input_values.to(torch_device) generated_ids = model.generate(input_values) generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" ] self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS) def test_generation_librispeech_batched(self): model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(4) inputs = processor(audio=input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) generated_ids = model.generate(input_values, attention_mask=attention_mask) generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister quilter's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and rosebeaf looming before us" " similars drawn from eating and its results occur most readily to the mind", "he has grave doubts whether sir frederick latin's work is really greek after all and can discover in it" " but little of rocky ithica", ] self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS) @require_torch class SpeechT5ForTextToSpeechTester: def __init__( self, parent, batch_size=13, encoder_seq_length=7, decoder_seq_length=1024, is_training=False, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, intermediate_size=4, vocab_size=81, num_mel_bins=20, reduction_factor=2, speech_decoder_postnet_layers=2, speech_decoder_postnet_units=32, speech_decoder_prenet_units=32, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.vocab_size = vocab_size self.num_mel_bins = num_mel_bins self.reduction_factor = reduction_factor self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_prenet_units = speech_decoder_prenet_units def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size).clamp(2) attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length]) decoder_input_values = floats_tensor([self.batch_size, self.decoder_seq_length, self.num_mel_bins], scale=1.0) decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_ids=input_ids, decoder_input_values=decoder_input_values, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, vocab_size=self.vocab_size, num_mel_bins=self.num_mel_bins, reduction_factor=self.reduction_factor, speech_decoder_postnet_layers=self.speech_decoder_postnet_layers, speech_decoder_postnet_units=self.speech_decoder_postnet_units, speech_decoder_prenet_units=self.speech_decoder_prenet_units, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5ForTextToSpeech(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] decoder_input_values = inputs_dict["decoder_input_values"] result = model(input_ids, attention_mask=attention_mask, decoder_input_values=decoder_input_values) self.parent.assertEqual( result.spectrogram.shape, (self.batch_size, self.decoder_seq_length * self.reduction_factor, self.num_mel_bins), ) @require_torch class SpeechT5ForTextToSpeechTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5ForTextToSpeech,) if is_torch_available() else () all_generative_model_classes = (SpeechT5ForTextToSpeech,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False input_name = "input_ids" def setUp(self): self.model_tester = SpeechT5ForTextToSpeechTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): pass def test_determinism(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_values", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_inputs_embeds(self): pass def test_model_outputs_equivalence(self): pass def test_save_load(self): pass def test_retain_grad_hidden_states_attentions(self): pass @slow def test_torchscript_output_attentions(self): pass @slow def test_torchscript_output_hidden_state(self): pass @slow def test_torchscript_simple(self): pass def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers class SpeechT5ForTextToSpeechIntegrationTests(unittest.TestCase): @cached_property def default_model(self): return SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") @cached_property def default_processor(self): return SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") @cached_property def default_vocoder(self): return SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") def test_generation(self): model = self.default_model model.to(torch_device) processor = self.default_processor set_seed(555) speaker_embeddings = torch.zeros((1, 512)).to(torch_device) input_text = "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" input_ids = processor(text=input_text, return_tensors="pt").input_ids.to(torch_device) generated_speech = model.generate_speech(input_ids, speaker_embeddings=speaker_embeddings) self.assertEqual(generated_speech.shape, (230, model.config.num_mel_bins)) set_seed(555) generated_speech_with_generate = model.generate( input_ids, attention_mask=None, speaker_embeddings=speaker_embeddings ) self.assertEqual(generated_speech_with_generate.shape, (230, model.config.num_mel_bins)) def test_batch_generation(self): model = self.default_model model.to(torch_device) processor = self.default_processor vocoder = self.default_vocoder set_seed(555) input_text = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister quilter's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and rosebeaf looming before us", ] inputs = processor(text=input_text, padding="max_length", max_length=128, return_tensors="pt").to(torch_device) speaker_embeddings = torch.zeros((1, 512), device=torch_device) spectrograms, spectrogram_lengths = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, attention_mask=inputs["attention_mask"], return_output_lengths=True, ) self.assertEqual(spectrograms.shape, (3, 262, model.config.num_mel_bins)) waveforms = vocoder(spectrograms) waveform_lengths = [int(waveforms.size(1) / max(spectrogram_lengths)) * i for i in spectrogram_lengths] set_seed(555) waveforms_with_vocoder, waveform_lengths_with_vocoder = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, attention_mask=inputs["attention_mask"], vocoder=vocoder, return_output_lengths=True, ) self.assertTrue(torch.allclose(waveforms, waveforms_with_vocoder, atol=1e-8)) self.assertEqual(waveform_lengths, waveform_lengths_with_vocoder) set_seed(555) waveforms_with_vocoder_no_lengths = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, attention_mask=inputs["attention_mask"], vocoder=vocoder, return_output_lengths=False, ) self.assertTrue(torch.allclose(waveforms_with_vocoder_no_lengths, waveforms_with_vocoder, atol=1e-8)) for i, text in enumerate(input_text): set_seed(555) inputs = processor(text=text, padding="max_length", max_length=128, return_tensors="pt").to(torch_device) spectrogram = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, ) self.assertEqual(spectrogram.shape, spectrograms[i][: spectrogram_lengths[i]].shape) self.assertTrue(torch.allclose(spectrogram, spectrograms[i][: spectrogram_lengths[i]], atol=5e-3)) waveform = vocoder(spectrogram) self.assertEqual(waveform.shape, waveforms[i][: waveform_lengths[i]].shape) set_seed(555) waveform_with_vocoder = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, vocoder=vocoder, ) self.assertTrue(torch.allclose(waveform, waveform_with_vocoder, atol=1e-8)) @require_torch class SpeechT5ForSpeechToSpeechTester: def __init__( self, parent, batch_size=13, encoder_seq_length=1024, decoder_seq_length=1024, is_training=False, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, intermediate_size=4, conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, vocab_size=81, num_mel_bins=20, reduction_factor=2, speech_decoder_postnet_layers=2, speech_decoder_postnet_units=32, speech_decoder_prenet_units=32, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.vocab_size = vocab_size self.num_mel_bins = num_mel_bins self.reduction_factor = reduction_factor self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_prenet_units = speech_decoder_prenet_units def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.encoder_seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length]) decoder_input_values = floats_tensor([self.batch_size, self.decoder_seq_length, self.num_mel_bins], scale=1.0) decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_values=input_values, decoder_input_values=decoder_input_values, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, vocab_size=self.vocab_size, num_mel_bins=self.num_mel_bins, reduction_factor=self.reduction_factor, speech_decoder_postnet_layers=self.speech_decoder_postnet_layers, speech_decoder_postnet_units=self.speech_decoder_postnet_units, speech_decoder_prenet_units=self.speech_decoder_prenet_units, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5ForSpeechToSpeech(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] decoder_input_values = inputs_dict["decoder_input_values"] result = model(input_values, attention_mask=attention_mask, decoder_input_values=decoder_input_values) self.parent.assertEqual( result.spectrogram.shape, (self.batch_size, self.decoder_seq_length * self.reduction_factor, self.num_mel_bins), ) @require_torch class SpeechT5ForSpeechToSpeechTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5ForSpeechToSpeech,) if is_torch_available() else () all_generative_model_classes = (SpeechT5ForSpeechToSpeech,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False input_name = "input_values" def setUp(self): self.model_tester = SpeechT5ForSpeechToSpeechTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): pass def test_determinism(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_seq_length ) subsampled_encoder_key_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_key_length ) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_values", "attention_mask", "decoder_input_values", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "feature_projection.projection.weight", "feature_projection.projection.bias", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_inputs_embeds(self): pass def test_model_common_attributes(self): pass def test_model_outputs_equivalence(self): pass def test_retain_grad_hidden_states_attentions(self): pass def test_save_load(self): pass @slow def test_torchscript_output_attentions(self): pass @slow def test_torchscript_output_hidden_state(self): pass @slow def test_torchscript_simple(self): pass def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers @slow class SpeechT5ForSpeechToSpeechIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return SpeechT5Processor.from_pretrained("microsoft/speecht5_vc") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(1) input_values = processor(audio=input_speech, return_tensors="pt").input_values.to(torch_device) speaker_embeddings = torch.zeros((1, 512), device=torch_device) generated_speech = model.generate_speech(input_values, speaker_embeddings=speaker_embeddings) self.assertEqual(generated_speech.shape[1], model.config.num_mel_bins) self.assertGreaterEqual(generated_speech.shape[0], 300) self.assertLessEqual(generated_speech.shape[0], 310) class SpeechT5HifiGanTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, num_mel_bins=20, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.num_mel_bins = num_mel_bins def prepare_config_and_inputs(self): input_values = floats_tensor([self.seq_length, self.num_mel_bins], scale=1.0) config = self.get_config() return config, input_values def get_config(self): return SpeechT5HifiGanConfig( model_in_dim=self.num_mel_bins, upsample_initial_channel=32, ) def create_and_check_model(self, config, input_values): model = SpeechT5HifiGan(config=config).to(torch_device).eval() result = model(input_values) self.parent.assertEqual(result.shape, (self.seq_length * 256,)) def prepare_config_and_inputs_for_common(self): config, input_values = self.prepare_config_and_inputs() inputs_dict = {"spectrogram": input_values} return config, inputs_dict @require_torch class SpeechT5HifiGanTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5HifiGan,) if is_torch_available() else () test_torchscript = False test_pruning = False test_resize_embeddings = False test_resize_position_embeddings = False test_head_masking = False test_mismatched_shapes = False test_missing_keys = False test_model_parallel = False is_encoder_decoder = False has_attentions = False input_name = "spectrogram" def setUp(self): self.model_tester = SpeechT5HifiGanTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5HifiGanConfig) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_from_and_save_pretrained_subfolder() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "spectrogram", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): pass def test_initialization(self): pass def test_inputs_embeds(self): pass def test_model_common_attributes(self): pass def test_model_outputs_equivalence(self): pass def test_retain_grad_hidden_states_attentions(self): pass def test_save_load_fast_init_from_base(self): pass def test_save_load_fast_init_to_base(self): pass def test_batched_inputs_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() batched_inputs = inputs["spectrogram"].unsqueeze(0).repeat(2, 1, 1) with torch.no_grad(): batched_outputs = model(batched_inputs.to(torch_device)) self.assertEqual( batched_inputs.shape[0], batched_outputs.shape[0], msg="Got different batch dims for input and output" ) def test_unbatched_inputs_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(inputs["spectrogram"].to(torch_device)) self.assertTrue(outputs.dim() == 1, msg="Got un-batched inputs but batched output")
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the speecht5 processors import json import os import shutil import tempfile import unittest from transformers import isspeechavailable istorchavailable from transformers models speecht5 import speecht5tokenizer from transformers testingutils import gettestsdir requiretorch from transformers utils import featureextractorname if isspeechavailable and istorchavailable from transformers import speecht5featureextractor speecht5processor from testfeatureextractionspeecht5 import floatslist samplevocab gettestsdirfixturestestsentencepiecebpechar model requiretorch class speecht5processortestunittest testcase def setupself self tmpdirname tempfile mkdtemp tokenizer speecht5tokenizersamplevocab tokenizer savepretrainedself tmpdirname featureextractormap featuresize 1 paddingvalue 0 0 samplingrate 16000 donormalize false nummelbins 80 hoplength 16 winlength 64 winfunction hannwindow fmin 80 fmax 7600 melfloor 1e10 reductionfactor 2 returnattentionmask true self featureextractionfile os path joinself tmpdirname featureextractorname with openself featureextractionfile w encodingutf8 as fp fp writejson dumpsfeatureextractormap n def gettokenizerself kwargs return speecht5tokenizer frompretrainedself tmpdirname kwargs def getfeatureextractorself kwargs return speecht5featureextractor frompretrainedself tmpdirname kwargs def teardownself shutil rmtreeself tmpdirname def testsaveloadpretraineddefaultself tokenizer self gettokenizer featureextractor self getfeatureextractor processor speecht5processortokenizertokenizer featureextractorfeatureextractor processor savepretrainedself tmpdirname processor speecht5processor frompretrainedself tmpdirname self assertequalprocessor tokenizer getvocab tokenizer getvocab self assertisinstanceprocessor tokenizer speecht5tokenizer self assertequalprocessor featureextractor tojsonstring featureextractor tojsonstring self assertisinstanceprocessor featureextractor speecht5featureextractor def testsaveloadpretrainedadditionalfeaturesself processor speecht5processortokenizerself gettokenizer featureextractorself getfeatureextractor processor savepretrainedself tmpdirname tokenizeraddkwargs self gettokenizerbostokenbos eostokeneos featureextractoraddkwargs self getfeatureextractordonormalizefalse paddingvalue1 0 processor speecht5processor frompretrained self tmpdirname bostokenbos eostokeneos donormalizefalse paddingvalue1 0 self assertequalprocessor tokenizer getvocab tokenizeraddkwargs getvocab self assertisinstanceprocessor tokenizer speecht5tokenizer self assertequalprocessor featureextractor tojsonstring featureextractoraddkwargs tojsonstring self assertisinstanceprocessor featureextractor speecht5featureextractor def testfeatureextractorself featureextractor self getfeatureextractor tokenizer self gettokenizer processor speecht5processortokenizertokenizer featureextractorfeatureextractor rawspeech floatslist3 1000 inputfeatextract featureextractoraudiorawspeech returntensorsnp inputprocessor processoraudiorawspeech returntensorsnp for key in inputfeatextract keys self assertalmostequalinputfeatextractkey sum inputprocessorkey sum delta1e2 def testfeatureextractortargetself featureextractor self getfeatureextractor tokenizer self gettokenizer processor speecht5processortokenizertokenizer featureextractorfeatureextractor rawspeech floatslist3 1000 inputfeatextract featureextractoraudiotargetrawspeech returntensorsnp inputprocessor processoraudiotargetrawspeech returntensorsnp for key in inputfeatextract keys self assertalmostequalinputfeatextractkey sum inputprocessorkey sum delta1e2 def testtokenizerself featureextractor self getfeatureextractor tokenizer self gettokenizer processor speecht5processortokenizertokenizer featureextractorfeatureextractor inputstr this is a test string encodedprocessor processortextinputstr encodedtok tokenizerinputstr for key in encodedtok keys self assertlistequalencodedtokkey encodedprocessorkey def testtokenizertargetself featureextractor self getfeatureextractor tokenizer self gettokenizer processor speecht5processortokenizertokenizer featureextractorfeatureextractor inputstr this is a test string encodedprocessor processortexttargetinputstr encodedtok tokenizerinputstr for key in encodedtok keys self assertlistequalencodedtokkey encodedprocessorkey def testtokenizerdecodeself featureextractor self getfeatureextractor tokenizer self gettokenizer processor speecht5processortokenizertokenizer featureextractorfeatureextractor predictedids 1 4 5 8 1 0 8 3 4 3 1 1 8 9 decodedprocessor processor batchdecodepredictedids decodedtok tokenizer batchdecodepredictedids self assertlistequaldecodedtok decodedprocessor def testmodelinputnamesself featureextractor self getfeatureextractor tokenizer self gettokenizer processor speecht5processortokenizertokenizer featureextractorfeatureextractor self assertlistequal processor modelinputnames featureextractor modelinputnames msgprocessor and featureextractor model input names do not match 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the speecht5 processors
import json import os import shutil import tempfile import unittest from transformers import is_speech_available, is_torch_available from transformers.models.speecht5 import SpeechT5Tokenizer from transformers.testing_utils import get_tests_dir, require_torch from transformers.utils import FEATURE_EXTRACTOR_NAME if is_speech_available() and is_torch_available(): from transformers import SpeechT5FeatureExtractor, SpeechT5Processor from .test_feature_extraction_speecht5 import floats_list SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_torch class SpeechT5ProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() tokenizer = SpeechT5Tokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "do_normalize": False, "num_mel_bins": 80, "hop_length": 16, "win_length": 64, "win_function": "hann_window", "fmin": 80, "fmax": 7600, "mel_floor": 1e-10, "reduction_factor": 2, "return_attention_mask": True, } self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") def get_tokenizer(self, **kwargs): return SpeechT5Tokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return SpeechT5FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = SpeechT5Processor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = SpeechT5Processor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = SpeechT5Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio=raw_speech, return_tensors="np") input_processor = processor(audio=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_feature_extractor_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio_target=raw_speech, return_tensors="np") input_processor = processor(audio_target=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text_target=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", )
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the speecht5 tokenizers import unittest from transformers import spieceunderline from transformers models speecht5 import speecht5tokenizer from transformers testingutils import gettestsdir requiresentencepiece requiretokenizers slow from transformers tokenizationutils import addedtoken from testtokenizationcommon import tokenizertestermixin samplevocab gettestsdirfixturestestsentencepiecebpechar model requiresentencepiece requiretokenizers class speecht5tokenizertesttokenizertestermixin unittest testcase tokenizerclass speecht5tokenizer testrusttokenizer false testsentencepiece true def setupself super setup we have a sentencepiece fixture for testing tokenizer speecht5tokenizersamplevocab masktoken addedtokenmask lstriptrue rstripfalse tokenizer masktoken masktoken tokenizer addspecialtokensmasktoken masktoken tokenizer addtokensctcblank tokenizer savepretrainedself tmpdirname def getinputoutputtextsself tokenizer inputtext this is a test outputtext this is a test return inputtext outputtext def getnumericinputoutputtextsself inputtext i have 123 45 and owe 59 78 my balance is 876 90 and have 73 stocks in my company which equals to 72649201 outputtext i have one hundred and twenty three point four five dollars and owe fifty nine point seven eight euros my balance is minus eight hundred and seventy six point nine zero ukrainian hryvnia and have seventy three percent stocks in my company which equals to seventy two million six hundred and forty nine thousand two hundred and one nigerian naira return inputtext outputtext def getcleansequenceself tokenizer withprefixspacefalse maxlength20 minlength5 inputtext outputtext self getinputoutputtextstokenizer ids tokenizer encodeoutputtext addspecialtokensfalse text tokenizer decodeids cleanuptokenizationspacesfalse return text ids def testtokenizernormalizationself tokenizer self gettokenizernormalizetrue inputtext expectedtext self getnumericinputoutputtexts inputids tokenizer encodeinputtext outputtext tokenizer decodeinputids skipspecialtokenstrue self assertequaloutputtext expectedtext def testconverttokenandidself we usually have added tokens from the start in tests because our vocab fixtures are smaller than the original vocabs let s not assert this self assertequalvocabsize allsize use custom sequence because this tokenizer does not handle numbers fmt off fmt on coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the speecht5 tokenizers we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token we usually have added tokens from the start in tests because our vocab fixtures are smaller than the original vocabs let s not assert this self assertequal vocab_size all_size fmt skip fmt skip fmt skip fmt skip use custom sequence because this tokenizer does not handle numbers fmt off fmt on
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speecht5 import SpeechT5Tokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = SpeechT5Tokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() tokenizer = SpeechT5Tokenizer(SAMPLE_VOCAB) mask_token = AddedToken("<mask>", lstrip=True, rstrip=False) tokenizer.mask_token = mask_token tokenizer.add_special_tokens({"mask_token": mask_token}) tokenizer.add_tokens(["<ctc_blank>"]) tokenizer.save_pretrained(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_text = "this is a test" output_text = "this is a test" return input_text, output_text def get_numeric_input_output_texts(self): input_text = "I have $123.45 and owe €59.78. My balance is -₴876.90 and have 73% stocks in my company which equals to ₦72649201" output_text = "I have one hundred and twenty three point four five dollars and owe fifty nine point seven eight euros. My balance is minus eight hundred and seventy six point nine zero ukrainian hryvnia and have seventy three percent stocks in my company which equals to seventy two million six hundred and forty nine thousand two hundred and one nigerian naira" return input_text, output_text def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_tokenizer_normalization(self): tokenizer = self.get_tokenizer(normalize=True) input_text, expected_text = self.get_numeric_input_output_texts() input_ids = tokenizer.encode(input_text) output_text = tokenizer.decode(input_ids, skip_special_tokens=True) self.assertEqual(output_text, expected_text) def test_convert_token_and_id(self): token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-4], "œ") self.assertEqual(vocab_keys[-2], "<mask>") self.assertEqual(vocab_keys[-1], "<ctc_blank>") self.assertEqual(len(vocab_keys), 81) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 79) def test_add_tokens_tokenizer(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) tokens = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokens[-4]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-3], tokenizer.pad_token_id) def test_pickle_subword_regularization_tokenizer(self): pass def test_subword_regularization_tokenizer(self): pass def test_full_tokenizer(self): tokenizer = self.get_tokenizer(normalize=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't']) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual(tokens,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 9, 10, 9, 5, 6, 22, 4, 6, 20, 8, 4, 6, 11, 8, 16, 12, 7, 9, 14, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual(back_tokens,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, 'n', 'i', 'n', 'e', 't', 'y', SPIECE_UNDERLINE, 't', 'w', 'o', SPIECE_UNDERLINE, 't', 'h', 'o', 'u', 's', 'a', 'n', 'd', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.']) @slow def test_tokenizer_integration(self): sequences = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] expected_encoding = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="microsoft/speecht5_asr", revision="c5ef64c71905caeccde0e4462ef3f9077224c524", sequences=sequences, )
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch splinter model import copy import unittest from transformers import istorchavailable from transformers testingutils import requiretorch requiretorchmultigpu slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import splinterconfig splinterforpretraining splinterforquestionanswering splintermodel from transformers models splinter modelingsplinter import splinterpretrainedmodelarchivelist class splintermodeltester def init self parent batchsize13 numquestions3 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 questiontokenid1 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self numquestions numquestions self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self questiontokenid questiontokenid self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputids 1 self questiontokenid inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize startpositions none endpositions none questionpositions none if self uselabels startpositions idstensorself batchsize self numquestions self typesequencelabelsize endpositions idstensorself batchsize self numquestions self typesequencelabelsize questionpositions idstensorself batchsize self numquestions self numlabels config splinterconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange questiontokenidself questiontokenid return config inputids tokentypeids inputmask startpositions endpositions questionpositions def createandcheckmodel self config inputids tokentypeids inputmask startpositions endpositions questionpositions model splintermodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforquestionanswering self config inputids tokentypeids inputmask startpositions endpositions questionpositions model splinterforquestionansweringconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids startpositionsstartpositions 0 endpositionsendpositions 0 self parent assertequalresult startlogits shape self batchsize self seqlength self parent assertequalresult endlogits shape self batchsize self seqlength def createandcheckforpretraining self config inputids tokentypeids inputmask startpositions endpositions questionpositions model splinterforpretrainingconfigconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids startpositionsstartpositions endpositionsendpositions questionpositionsquestionpositions self parent assertequalresult startlogits shape self batchsize self numquestions self seqlength self parent assertequalresult endlogits shape self batchsize self numquestions self seqlength def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask startpositions endpositions questionpositions configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class splintermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses splintermodel splinterforquestionanswering splinterforpretraining if istorchavailable else pipelinemodelmapping featureextraction splintermodel questionanswering splinterforquestionanswering if istorchavailable else todo fix the failed tests when this model gets more usage def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname if pipelinetestcassename qapipelinetests return true elif pipelinetestcassename featureextractionpipelinetests and tokenizername endswithfast return true return false def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict copy deepcopyinputsdict if returnlabels if issubclassmodelclass splinterforpretraining inputsdictstartpositions torch zeros self modeltester batchsize self modeltester numquestions dtypetorch long devicetorchdevice inputsdictendpositions torch zeros self modeltester batchsize self modeltester numquestions dtypetorch long devicetorchdevice inputsdictquestionpositions torch zeros self modeltester batchsize self modeltester numquestions dtypetorch long devicetorchdevice elif issubclassmodelclass splinterforquestionanswering inputsdictstartpositions torch zeros self modeltester batchsize dtypetorch long devicetorchdevice inputsdictendpositions torch zeros self modeltester batchsize dtypetorch long devicetorchdevice return inputsdict def setupself self modeltester splintermodeltesterself self configtester configtesterself configclasssplinterconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputs for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforpretrainingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforpretrainingconfigandinputs def testinputsembedsself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval inputs copy deepcopyself prepareforclassinputsdict modelclass if not self isencoderdecoder inputids inputsinputids del inputsinputids else encoderinputids inputsinputids decoderinputids inputs getdecoderinputids encoderinputids del inputsinputids inputs popdecoderinputids none wte model getinputembeddings if not self isencoderdecoder inputsinputsembeds wteinputids else inputsinputsembeds wteencoderinputids inputsdecoderinputsembeds wtedecoderinputids with torch nograd if isinstancemodel splinterforpretraining with self assertraisestypeerror questionpositions must not be none modelinputs0 else modelinputs0 slow def testmodelfrompretrainedself for modelname in splinterpretrainedmodelarchivelist 1 model splintermodel frompretrainedmodelname self assertisnotnonemodel overwrite from common since splinterforpretraining could contain different number of question tokens in inputs when the batch is distributed to multiple devices each replica could get different values for the maximal number of question tokens see splinterforpretraining preparequestionpositions and the model returns different shape along dimension 1 i e numquestions that could not be combined into a single tensor as an output requiretorchmultigpu def testmultigpudataparallelforwardself from torch import nn config inputsdict self modeltester prepareconfigandinputsforcommon some params shouldn t be scattered by nn dataparallel so just remove them if they are present blacklistnonbatchedparams headmask decoderheadmask crossattnheadmask for k in blacklistnonbatchedparams inputsdict popk none move input tensors to cuda o for k v in inputsdict items if torch istensorv inputsdictk v to0 for modelclass in self allmodelclasses skip this case since it will fail sometimes as described above if modelclass splinterforpretraining continue model modelclassconfigconfig model to0 model eval wrap model in nn dataparallel model nn dataparallelmodel with torch nograd modelself prepareforclassinputsdict modelclass requiretorch class splintermodelintegrationtestunittest testcase slow def testsplinterquestionansweringself model splinterforquestionanswering frompretrainedtausplinterbaseqass input cls brad was born in question he returned to the united kingdom later sep output should be the span the united kingdom inputids torch tensor 101 7796 1108 1255 1107 104 119 1124 1608 1106 1103 1244 2325 1224 119 102 output modelinputids expectedshape torch size1 16 self assertequaloutput startlogits shape expectedshape self assertequaloutput endlogits shape expectedshape self assertequaltorch argmaxoutput startlogits 10 self assertequaltorch argmaxoutput endlogits 12 slow def testsplinterpretrainingself model splinterforpretraining frompretrainedtausplinterbaseqass input cls question was born in question brad returned to the united kingdom later sep output should be the spans brad and the united kingdom inputids torch tensor 101 104 1108 1255 1107 104 119 7796 1608 1106 1103 1244 2325 1224 119 102 questionpositions torch tensor1 5 dtypetorch long output modelinputids questionpositionsquestionpositions expectedshape torch size1 2 16 self assertequaloutput startlogits shape expectedshape self assertequaloutput endlogits shape expectedshape self assertequaltorch argmaxoutput startlogits0 0 7 self assertequaltorch argmaxoutput endlogits0 0 7 self assertequaltorch argmaxoutput startlogits0 1 10 self assertequaltorch argmaxoutput endlogits0 1 12 slow def testsplinterpretraininglossrequiresquestionpositionsself model splinterforpretraining frompretrainedtausplinterbaseqass input cls question was born in question brad returned to the united kingdom later sep output should be the spans brad and the united kingdom inputids torch tensor 101 104 1108 1255 1107 104 119 7796 1608 1106 1103 1244 2325 1224 119 102 startpositions torch tensor7 10 dtypetorch long endpositions torch tensor7 12 dtypetorch long with self assertraisestypeerror model inputids startpositionsstartpositions endpositionsendpositions slow def testsplinterpretraininglossself model splinterforpretraining frompretrainedtausplinterbaseqass input cls question was born in question brad returned to the united kingdom later sep output should be the spans brad and the united kingdom inputids torch tensor 101 104 1108 1255 1107 104 119 7796 1608 1106 1103 1244 2325 1224 119 102 101 104 1108 1255 1107 104 119 7796 1608 1106 1103 1244 2325 1224 119 102 startpositions torch tensor7 10 7 10 dtypetorch long endpositions torch tensor7 12 7 12 dtypetorch long questionpositions torch tensor1 5 1 5 dtypetorch long output model inputids startpositionsstartpositions endpositionsendpositions questionpositionsquestionpositions self assertalmostequaloutput loss item 0 0024 4 slow def testsplinterpretraininglosswithpaddingself model splinterforpretraining frompretrainedtausplinterbaseqass input cls question was born in question brad returned to the united kingdom later sep output should be the spans brad and the united kingdom inputids torch tensor 101 104 1108 1255 1107 104 119 7796 1608 1106 1103 1244 2325 1224 119 102 startpositions torch tensor7 10 dtypetorch long endpositions torch tensor7 12 dtypetorch long questionpositions torch tensor1 5 dtypetorch long startpositionswithpadding torch tensor7 10 0 dtypetorch long endpositionswithpadding torch tensor7 12 0 dtypetorch long questionpositionswithpadding torch tensor1 5 0 dtypetorch long output model inputids startpositionsstartpositions endpositionsendpositions questionpositionsquestionpositions outputwithpadding model inputids startpositionsstartpositionswithpadding endpositionsendpositionswithpadding questionpositionsquestionpositionswithpadding self assertalmostequaloutput loss item outputwithpadding loss item 4 note that the original code uses 0 to denote padded question tokens and their start and end positions as the padtokenid of the model s config is used for the losse s ignoreindex in splinterforpretraining we add this test to ensure anybody making changes to the default value of the config will be aware of the implication self assertequalmodel config padtokenid 0 slow def testsplinterpretrainingpreparequestionpositionsself model splinterforpretraining frompretrainedtausplinterbaseqass inputids torch tensor 101 104 1 2 104 3 4 102 101 1 104 2 104 3 104 102 101 1 2 104 104 3 4 102 101 1 2 3 4 5 104 102 questionpositions torch tensor1 4 0 2 4 6 3 4 0 6 0 0 dtypetorch long outputwithoutpositions modelinputids outputwithpositions modelinputids questionpositionsquestionpositions self asserttrueoutputwithoutpositions startlogits outputwithpositions startlogits all self asserttrueoutputwithoutpositions endlogits outputwithpositions endlogits all coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch splinter model todo fix the failed tests when this model gets more usage question_positions must not be none overwrite from common since splinterforpretraining could contain different number of question tokens in inputs when the batch is distributed to multiple devices each replica could get different values for the maximal number of question tokens see splinterforpretraining _prepare_question_positions and the model returns different shape along dimension 1 i e num_questions that could not be combined into a single tensor as an output some params shouldn t be scattered by nn dataparallel so just remove them if they are present move input tensors to cuda o skip this case since it will fail sometimes as described above wrap model in nn dataparallel input cls brad was born in question he returned to the united kingdom later sep output should be the span the united kingdom input cls question was born in question brad returned to the united kingdom later sep output should be the spans brad and the united kingdom input cls question was born in question brad returned to the united kingdom later sep output should be the spans brad and the united kingdom input cls question was born in question brad returned to the united kingdom later sep output should be the spans brad and the united kingdom input cls question was born in question brad returned to the united kingdom later sep output should be the spans brad and the united kingdom note that the original code uses 0 to denote padded question tokens and their start and end positions as the pad_token_id of the model s config is used for the losse s ignore_index in splinterforpretraining we add this test to ensure anybody making changes to the default value of the config will be aware of the implication
import copy import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import SplinterConfig, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterModel from transformers.models.splinter.modeling_splinter import SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST class SplinterModelTester: def __init__( self, parent, batch_size=13, num_questions=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, question_token_id=1, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_questions = num_questions self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.question_token_id = question_token_id self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[:, 1] = self.question_token_id input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) start_positions = None end_positions = None question_positions = None if self.use_labels: start_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) end_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) question_positions = ids_tensor([self.batch_size, self.num_questions], self.num_labels) config = SplinterConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, question_token_id=self.question_token_id, ) return (config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions[:, 0], end_positions=end_positions[:, 0], ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class SplinterModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SplinterModel, SplinterForQuestionAnswering, SplinterForPreTraining, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": SplinterModel, "question-answering": SplinterForQuestionAnswering} if is_torch_available() else {} ) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests": return True elif pipeline_test_casse_name == "FeatureExtractionPipelineTests" and tokenizer_name.endswith("Fast"): return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if issubclass(model_class, SplinterForPreTraining): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["question_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) elif issubclass(model_class, SplinterForQuestionAnswering): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = SplinterModelTester(self) self.config_tester = ConfigTester(self, config_class=SplinterConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): if isinstance(model, SplinterForPreTraining): with self.assertRaises(TypeError): model(**inputs)[0] else: model(**inputs)[0] @slow def test_model_from_pretrained(self): for model_name in SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SplinterModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): from torch import nn config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: if model_class == SplinterForPreTraining: continue model = model_class(config=config) model.to(0) model.eval() model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch class SplinterModelIntegrationTest(unittest.TestCase): @slow def test_splinter_question_answering(self): model = SplinterForQuestionAnswering.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) output = model(input_ids) expected_shape = torch.Size((1, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits), 10) self.assertEqual(torch.argmax(output.end_logits), 12) @slow def test_splinter_pretraining(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) question_positions = torch.tensor([[1, 5]], dtype=torch.long) output = model(input_ids, question_positions=question_positions) expected_shape = torch.Size((1, 2, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.end_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.start_logits[0, 1]), 10) self.assertEqual(torch.argmax(output.end_logits[0, 1]), 12) @slow def test_splinter_pretraining_loss_requires_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) with self.assertRaises(TypeError): model( input_ids, start_positions=start_positions, end_positions=end_positions, ) @slow def test_splinter_pretraining_loss(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10], [7, 10]], dtype=torch.long) end_positions = torch.tensor([[7, 12], [7, 12]], dtype=torch.long) question_positions = torch.tensor([[1, 5], [1, 5]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.assertAlmostEqual(output.loss.item(), 0.0024, 4) @slow def test_splinter_pretraining_loss_with_padding(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) question_positions = torch.tensor([[1, 5]], dtype=torch.long) start_positions_with_padding = torch.tensor([[7, 10, 0]], dtype=torch.long) end_positions_with_padding = torch.tensor([7, 12, 0], dtype=torch.long) question_positions_with_padding = torch.tensor([[1, 5, 0]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) output_with_padding = model( input_ids, start_positions=start_positions_with_padding, end_positions=end_positions_with_padding, question_positions=question_positions_with_padding, ) self.assertAlmostEqual(output.loss.item(), output_with_padding.loss.item(), 4) self.assertEqual(model.config.pad_token_id, 0) @slow def test_splinter_pretraining_prepare_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [ [101, 104, 1, 2, 104, 3, 4, 102], [101, 1, 104, 2, 104, 3, 104, 102], [101, 1, 2, 104, 104, 3, 4, 102], [101, 1, 2, 3, 4, 5, 104, 102], ] ) question_positions = torch.tensor([[1, 4, 0], [2, 4, 6], [3, 4, 0], [6, 0, 0]], dtype=torch.long) output_without_positions = model(input_ids) output_with_positions = model(input_ids, question_positions=question_positions) self.assertTrue((output_without_positions.start_logits == output_with_positions.start_logits).all()) self.assertTrue((output_without_positions.end_logits == output_with_positions.end_logits).all())
codingutf8 2020 the squeezebert s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2020 the squeezebert s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class SqueezeBertModelTester(object): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, q_groups=2, k_groups=2, v_groups=2, post_attention_groups=2, intermediate_groups=4, output_groups=1, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.q_groups = q_groups self.k_groups = k_groups self.v_groups = v_groups self.post_attention_groups = post_attention_groups self.intermediate_groups = intermediate_groups self.output_groups = output_groups def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return SqueezeBertConfig( embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, ) def create_and_check_squeezebert_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = SqueezeBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_squeezebert_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = SqueezeBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_squeezebert_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = SqueezeBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_squeezebert_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = SqueezeBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_squeezebert_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = SqueezeBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_squeezebert_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = SqueezeBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SqueezeBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = True test_head_masking = False def setUp(self): self.model_tester = SqueezeBertModelTester(self) self.config_tester = ConfigTester(self, config_class=SqueezeBertConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_squeezebert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SqueezeBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_sentencepiece @require_tokenizers @require_torch class SqueezeBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_classification_head(self): model = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli") input_ids = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 3)) self.assertEqual(output.shape, expected_shape) expected_tensor = torch.tensor([[0.6401, -0.0349, -0.6041]]) self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
codingutf8 2020 the squeezebert s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2020 the squeezebert s and the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from transformers import SqueezeBertTokenizer, SqueezeBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class SqueezeBertTokenizationTest(BertTokenizationTest): tokenizer_class = SqueezeBertTokenizer rust_tokenizer_class = SqueezeBertTokenizerFast test_rust_tokenizer = True def get_rust_tokenizer(self, **kwargs): return SqueezeBertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) @slow def test_sequence_builders(self): tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-mnli-headless") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ]
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch swiftformer model import copy import unittest from transformers import pretrainedconfig swiftformerconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import swiftformerforimageclassification swiftformermodel from transformers models swiftformer modelingswiftformer import swiftformerpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import vitimageprocessor class swiftformermodeltester def init self parent batchsize13 numchannels3 istrainingtrue uselabelstrue hiddendropoutprob0 1 attentionprobsdropoutprob0 1 imagesize224 numlabels3 layerdepths1 1 1 1 embeddims16 16 32 32 self parent parent self batchsize batchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self numlabels numlabels self imagesize imagesize self layerdepths layerdepths self embeddims embeddims def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself return swiftformerconfig depthsself layerdepths embeddimsself embeddims mlpratio4 downsamplestrue true true true hiddenactgelu numlabelsself numlabels downpatchsize3 downstride2 downpad1 droprate0 0 droppathrate0 0 uselayerscaletrue layerscaleinitvalue1e5 def createandcheckmodelself config pixelvalues labels model swiftformermodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self embeddims1 7 7 def createandcheckforimageclassificationself config pixelvalues labels config numlabels self numlabels model swiftformerforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self numlabels model swiftformerforimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self numlabels def prepareconfigandinputsforcommonself config pixelvalues labels self prepareconfigandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class swiftformermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses swiftformermodel swiftformerforimageclassification if istorchavailable else pipelinemodelmapping featureextraction swiftformermodel imageclassification swiftformerforimageclassification if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester swiftformermodeltesterself self configtester configtester self configclassswiftformerconfig hastextmodalityfalse hiddensize37 numattentionheads12 numhiddenlayers12 def testconfigself self configtester runcommontests unittest skipreasonswiftformer does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in swiftformerpretrainedmodelarchivelist 1 model swiftformermodel frompretrainedmodelname self assertisnotnonemodel unittest skipreasonswiftformer does not output attentions def testattentionoutputsself pass def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumstages 8 self assertequallenhiddenstates expectednumstages todo swiftformer s feature maps are of shape batchsize embeddims height width with the width and height being successively divided by 2 after every 2 blocks for i in rangelenhiddenstates self assertequal hiddenstatesi shape torch size self modeltester batchsize self modeltester embeddimsi 2 self modeltester imagesize 4 2 i 2 self modeltester imagesize 4 2 i 2 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testinitializationself def configzeroinitconfig configsnoinit copy deepcopyconfig for key in configsnoinit dict keys if range in key or std in key or initializerfactor in key or layerscale in key setattrconfigsnoinit key 1e10 if isinstancegetattrconfigsnoinit key none pretrainedconfig noinitsubconfig configzeroinitgetattrconfigsnoinit key setattrconfigsnoinit key noinitsubconfig return configsnoinit config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad self assertin param data mean 1e9 1e9 round item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class swiftformermodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return vitimageprocessor frompretrainedmbzuaiswiftformerxs if isvisionavailable else none slow def testinferenceimageclassificationheadself model swiftformerforimageclassification frompretrainedmbzuaiswiftformerxs totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor2 1703e00 2 1107e00 2 0811e00 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch swiftformer model here we also overwrite some of the tests of test_modeling_common py as swiftformer does not use input_ids inputs_embeds attention_mask and seq_length todo swiftformer s feature maps are of shape batch_size embed_dims height width with the width and height being successively divided by 2 after every 2 blocks check that output_hidden_states also work using config we will verify our results on an image of cute cats forward pass verify the logits
import copy import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SwiftFormerModelTester: def __init__( self, parent, batch_size=13, num_channels=3, is_training=True, use_labels=True, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, image_size=224, num_labels=3, layer_depths=[1, 1, 1, 1], embed_dims=[16, 16, 32, 32], ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_labels = num_labels self.image_size = image_size self.layer_depths = layer_depths self.embed_dims = embed_dims def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return SwiftFormerConfig( depths=self.layer_depths, embed_dims=self.embed_dims, mlp_ratio=4, downsamples=[True, True, True, True], hidden_act="gelu", num_labels=self.num_labels, down_patch_size=3, down_stride=2, down_pad=1, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-5, ) def create_and_check_model(self, config, pixel_values, labels): model = SwiftFormerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dims[-1], 7, 7)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = SwiftFormerForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) model = SwiftFormerForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): (config, pixel_values, labels) = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SwiftFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = SwiftFormerModelTester(self) self.config_tester = ConfigTester( self, config_class=SwiftFormerConfig, has_text_modality=False, hidden_size=37, num_attention_heads=12, num_hidden_layers=12, ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SwiftFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="SwiftFormer does not output attentions") def test_attention_outputs(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 8 self.assertEqual(len(hidden_states), expected_num_stages) for i in range(len(hidden_states)): self.assertEqual( hidden_states[i].shape, torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ), ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class SwiftFormerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch swin model import collections import unittest from transformers import swinconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testbackbonecommon import backbonetestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import swinbackbone swinforimageclassification swinformaskedimagemodeling swinmodel from transformers models swin modelingswin import swinpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class swinmodeltester def init self parent batchsize13 imagesize32 patchsize2 numchannels3 embeddim16 depths1 2 1 numheads2 2 4 windowsize2 mlpratio2 0 qkvbiastrue hiddendropoutprob0 0 attentionprobsdropoutprob0 0 droppathrate0 1 hiddenactgelu useabsoluteembeddingsfalse patchnormtrue initializerrange0 02 layernormeps1e5 istrainingtrue scopenone uselabelstrue typesequencelabelsize10 encoderstride8 outfeaturesstage1 stage2 outindices1 2 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self embeddim embeddim self depths depths self numheads numheads self windowsize windowsize self mlpratio mlpratio self qkvbias qkvbias self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self droppathrate droppathrate self hiddenact hiddenact self useabsoluteembeddings useabsoluteembeddings self patchnorm patchnorm self layernormeps layernormeps self initializerrange initializerrange self istraining istraining self scope scope self uselabels uselabels self typesequencelabelsize typesequencelabelsize self encoderstride encoderstride self outfeatures outfeatures self outindices outindices def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return swinconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels embeddimself embeddim depthsself depths numheadsself numheads windowsizeself windowsize mlpratioself mlpratio qkvbiasself qkvbias hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob droppathrateself droppathrate hiddenactself hiddenact useabsoluteembeddingsself useabsoluteembeddings pathnormself patchnorm layernormepsself layernormeps initializerrangeself initializerrange encoderstrideself encoderstride outfeaturesself outfeatures outindicesself outindices def createandcheckmodelself config pixelvalues labels model swinmodelconfigconfig model totorchdevice model eval result modelpixelvalues expectedseqlen config imagesize config patchsize 2 4 lenconfig depths 1 expecteddim intconfig embeddim 2 lenconfig depths 1 self parent assertequalresult lasthiddenstate shape self batchsize expectedseqlen expecteddim def createandcheckbackboneself config pixelvalues labels model swinbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify hidden states self parent assertequallenresult featuremaps lenconfig outfeatures self parent assertlistequallistresult featuremaps0 shape self batchsize model channels0 16 16 verify channels self parent assertequallenmodel channels lenconfig outfeatures verify backbone works with outfeaturesnone config outfeatures none model swinbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequallistresult featuremaps0 shape self batchsize model channels1 4 4 verify channels self parent assertequallenmodel channels 1 def createandcheckformaskedimagemodelingself config pixelvalues labels model swinformaskedimagemodelingconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result logits shape self batchsize self numchannels self imagesize self imagesize test greyscale images config numchannels 1 model swinformaskedimagemodelingconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize 1 self imagesize self imagesize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model swinforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model swinforimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class swinmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses swinmodel swinbackbone swinforimageclassification swinformaskedimagemodeling if istorchavailable else pipelinemodelmapping featureextraction swinmodel imageclassification swinforimageclassification if istorchavailable else fxcompatible true testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester swinmodeltesterself self configtester configtesterself configclassswinconfig embeddim37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs todo check if this works again for pytorch 2 x y unittest skipreasongot cuda error misaligned address with pytorch 2 0 0 def testmultigpudataparallelforwardself pass def testtraininggradientcheckpointingself super testtraininggradientcheckpointing def testbackboneself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbackboneconfigandinputs def testformaskedimagemodelingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedimagemodelingconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs unittest skipreasonswin does not use inputsembeds def testinputsembedsself pass unittest skipreasonswin transformer does not use feedforward chunking def testfeedforwardchunkingself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions expectednumattentions lenself modeltester depths self assertequallenattentions expectednumattentions check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true windowsizesquared config windowsize2 model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions expectednumattentions self assertlistequal listattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass also another 1 for reshapedhiddenstates addedhiddenstates 1 if modelclass name swinbackbone else 2 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions expectednumattentions self assertlistequal listselfattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared def checkhiddenstatesoutputself inputsdict config modelclass imagesize model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers lenself modeltester depths 1 self assertequallenhiddenstates expectednumlayers swin has a different seqlength patchsize config patchsize if isinstanceconfig patchsize collections abc iterable else config patchsize config patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self assertlistequal listhiddenstates0 shape2 numpatches self modeltester embeddim if not modelclass name swinbackbone reshapedhiddenstates outputs reshapedhiddenstates self assertequallenreshapedhiddenstates expectednumlayers batchsize numchannels height width reshapedhiddenstates0 shape reshapedhiddenstates reshapedhiddenstates0 viewbatchsize numchannels height width permute0 2 1 self assertlistequal listreshapedhiddenstates shape2 numpatches self modeltester embeddim def testhiddenstatesoutputself config inputsdict self modeltester prepareconfigandinputsforcommon imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize for modelclass in self allmodelclasses inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize def testhiddenstatesoutputwithpaddingself config inputsdict self modeltester prepareconfigandinputsforcommon config patchsize 3 imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize patchsize config patchsize if isinstanceconfig patchsize collections abc iterable else config patchsize config patchsize paddedheight imagesize0 patchsize0 imagesize0 patchsize0 paddedwidth imagesize1 patchsize1 imagesize1 patchsize1 for modelclass in self allmodelclasses inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth slow def testmodelfrompretrainedself for modelname in swinpretrainedmodelarchivelist 1 model swinmodel frompretrainedmodelname self assertisnotnonemodel def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if embeddings not in name and param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized requirevision requiretorch class swinmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedmicrosoftswintinypatch4window7224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model swinforimageclassification frompretrainedmicrosoftswintinypatch4window7224 totorchdevice imageprocessor self defaultimageprocessor image image open testsfixturestestssamplescoco000000039769 png inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 0948 0 6454 0 0921 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 requiretorch class swinbackbonetestunittest testcase backbonetestermixin allmodelclasses swinbackbone if istorchavailable else configclass swinconfig def setupself self modeltester swinmodeltesterself coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch swin model verify hidden states verify channels verify backbone works with out_features none verify feature maps verify channels test greyscale images test greyscale images todo check if this works again for pytorch 2 x y check that output_attentions also work using config check attention is always last and order is fine also another 1 for reshaped_hidden_states swin has a different seq_length check that output_hidden_states also work using config check that output_hidden_states also work using config forward pass verify the logits
import collections import unittest from transformers import SwinConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel from transformers.models.swin.modeling_swin import SWIN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SwinModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, out_features=["stage1", "stage2"], out_indices=[1, 2], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return SwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = SwinModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_backbone(self, config, pixel_values, labels): model = SwinBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], 16, 16]) self.parent.assertEqual(len(model.channels), len(config.out_features)) config.out_features = None model = SwinBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[-1], 4, 4]) self.parent.assertEqual(len(model.channels), 1) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = SwinForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) config.num_channels = 1 model = SwinForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = SwinForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = SwinForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SwinModel, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": SwinModel, "image-classification": SwinForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = SwinModelTester(self) self.config_tester = ConfigTester(self, config_class=SwinConfig, embed_dim=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass def test_training_gradient_checkpointing(self): super().test_training_gradient_checkpointing() def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="Swin does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Swin Transformer does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 if model_class.__name__ == "SwinBackbone" else 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) if not model_class.__name__ == "SwinBackbone": reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): for model_name in SWIN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SwinModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_vision @require_torch class SwinModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = SwinForImageClassification.from_pretrained("microsoft/swin-tiny-patch4-window7-224").to(torch_device) image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.0948, -0.6454, -0.0921]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @require_torch class SwinBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (SwinBackbone,) if is_torch_available() else () config_class = SwinConfig def setUp(self): self.model_tester = SwinModelTester(self)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tf 2 0 swin model from future import annotations import inspect import unittest import numpy as np from transformers import swinconfig from transformers testingutils import requiretf requirevision slow to2tuple from transformers utils import cachedproperty istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers models swin modelingtfswin import tfswinpretrainedmodelarchivelist tfswinforimageclassification tfswinformaskedimagemodeling tfswinmodel if isvisionavailable from pil import image from transformers import autoimageprocessor class tfswinmodeltester def init self parent batchsize13 imagesize32 patchsize2 numchannels3 embeddim16 depths1 2 1 numheads2 2 4 windowsize2 mlpratio2 0 qkvbiastrue hiddendropoutprob0 0 attentionprobsdropoutprob0 0 droppathrate0 1 hiddenactgelu useabsoluteembeddingsfalse patchnormtrue initializerrange0 02 layernormeps1e5 istrainingtrue scopenone uselabelstrue typesequencelabelsize10 encoderstride8 none self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self embeddim embeddim self depths depths self numheads numheads self windowsize windowsize self mlpratio mlpratio self qkvbias qkvbias self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self droppathrate droppathrate self hiddenact hiddenact self useabsoluteembeddings useabsoluteembeddings self patchnorm patchnorm self layernormeps layernormeps self initializerrange initializerrange self istraining istraining self scope scope self uselabels uselabels self typesequencelabelsize typesequencelabelsize self encoderstride encoderstride def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return swinconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels embeddimself embeddim depthsself depths numheadsself numheads windowsizeself windowsize mlpratioself mlpratio qkvbiasself qkvbias hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob droppathrateself droppathrate hiddenactself hiddenact useabsoluteembeddingsself useabsoluteembeddings pathnormself patchnorm layernormepsself layernormeps initializerrangeself initializerrange encoderstrideself encoderstride def createandcheckmodelself config pixelvalues labels model tfswinmodelconfigconfig result modelpixelvalues expectedseqlen config imagesize config patchsize 2 4 lenconfig depths 1 expecteddim intconfig embeddim 2 lenconfig depths 1 self parent assertequalresult lasthiddenstate shape self batchsize expectedseqlen expecteddim def createandcheckformaskedimagemodelingself config pixelvalues labels model tfswinformaskedimagemodelingconfigconfig result modelpixelvalues self parent assertequal result logits shape self batchsize self numchannels self imagesize self imagesize test greyscale images config numchannels 1 model tfswinformaskedimagemodelingconfig pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize 1 self imagesize self imagesize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model tfswinforimageclassificationconfig result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model tfswinforimageclassificationconfig pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfswinmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfswinmodel tfswinforimageclassification tfswinformaskedimagemodeling if istfavailable else pipelinemodelmapping featureextraction tfswinmodel imageclassification tfswinforimageclassification if istfavailable else testpruning false testresizeembeddings false testheadmasking false testonnx false def setupself self modeltester tfswinmodeltesterself self configtester configtesterself configclassswinconfig embeddim37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testformaskedimagemodelingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedimagemodelingconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs unittest skipreasonswin does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings tf keras layers layer x model getoutputembeddings self asserttruex is none or isinstancex tf keras layers dense def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions expectednumattentions lenself modeltester depths self assertequallenattentions expectednumattentions check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true windowsizesquared config windowsize2 model modelclassconfig outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions expectednumattentions self assertlistequal listattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes else also another 1 for reshapedhiddenstates addedhiddenstates 2 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions expectednumattentions self assertlistequal listselfattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared def checkhiddenstatesoutputself inputsdict config modelclass imagesize model modelclassconfig outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers lenself modeltester depths 1 self assertequallenhiddenstates expectednumlayers swin has a different seqlength patchsize to2tupleconfig patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self assertlistequal listhiddenstates0 shape2 numpatches self modeltester embeddim reshapedhiddenstates outputs reshapedhiddenstates self assertequallenreshapedhiddenstates expectednumlayers batchsize numchannels height width reshapedhiddenstates0 shape reshapedhiddenstates tf reshapereshapedhiddenstates0 batchsize numchannels height width reshapedhiddenstates tf transposereshapedhiddenstates 0 2 1 self assertlistequal listreshapedhiddenstates shape2 numpatches self modeltester embeddim def testhiddenstatesoutputself config inputsdict self modeltester prepareconfigandinputsforcommon imagesize to2tupleself modeltester imagesize for modelclass in self allmodelclasses inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize def testinputsrequiringpaddingself config inputsdict self modeltester prepareconfigandinputsforcommon config patchsize 3 imagesize to2tupleself modeltester imagesize patchsize to2tupleconfig patchsize paddedheight imagesize0 patchsize0 imagesize0 patchsize0 paddedwidth imagesize1 patchsize1 imagesize1 patchsize1 for modelclass in self allmodelclasses inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth slow def testmodelfrompretrainedself for modelname in tfswinpretrainedmodelarchivelist 1 model tfswinmodel frompretrainedmodelname self assertisnotnonemodel requirevision requiretf class tfswinmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedmicrosoftswintinypatch4window7224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model tfswinforimageclassification frompretrainedmicrosoftswintinypatch4window7224 imageprocessor self defaultimageprocessor image image open testsfixturestestssamplescoco000000039769 png inputs imageprocessorimagesimage returntensorstf forward pass outputs modelinputs verify the logits expectedshape tf tensorshape1 1000 self assertequaloutputs logits shape expectedshape expectedslice tf constant0 0948 0 6454 0 0921 self asserttruenp allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tf 2 0 swin model test greyscale images test greyscale images signature parameters is an ordereddict so arg_names order is deterministic check that output_attentions also work using config check attention is always last and order is fine also another 1 for reshaped_hidden_states swin has a different seq_length check that output_hidden_states also work using config check that output_hidden_states also work using config forward pass verify the logits
from __future__ import annotations import inspect import unittest import numpy as np from transformers import SwinConfig from transformers.testing_utils import require_tf, require_vision, slow, to_2tuple from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.swin.modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, ) if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class TFSwinModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, ) -> None: self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return SwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = TFSwinModel(config=config) result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = TFSwinForMaskedImageModeling(config=config) result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) config.num_channels = 1 model = TFSwinForMaskedImageModeling(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFSwinForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = TFSwinForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFSwinModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFSwinModel, TFSwinForImageClassification, TFSwinForMaskedImageModeling, ) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFSwinModel, "image-classification": TFSwinForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFSwinModelTester(self) self.config_tester = ConfigTester(self, config_class=SwinConfig, embed_dim=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="Swin does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), tf.keras.layers.Layer) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Dense)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) patch_size = to_2tuple(config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = tf.reshape(reshaped_hidden_states[0], (batch_size, num_channels, height * width)) reshaped_hidden_states = tf.transpose(reshaped_hidden_states, (0, 2, 1)) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = to_2tuple(self.model_tester.image_size) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_inputs_requiring_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = to_2tuple(self.model_tester.image_size) patch_size = to_2tuple(config.patch_size) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): for model_name in TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFSwinModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_vision @require_tf class TFSwinModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFSwinForImageClassification.from_pretrained("microsoft/swin-tiny-patch4-window7-224") image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, return_tensors="tf") outputs = model(inputs) expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.0948, -0.6454, -0.0921]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license swin2srimageprocessor does not support batched input initialize imageprocessing create random pil images test not batched input swin2srimageprocessor does not support batched input initialize imageprocessing create random numpy tensors test not batched input swin2srimageprocessor does not support batched input initialize imageprocessing create random numpy tensors test not batched input swin2srimageprocessor does not support batched input initialize imageprocessing create random pytorch tensors test not batched input coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license swin2srimageprocessor does not support batched input initialize image_processing create random pil images test not batched input swin2srimageprocessor does not support batched input initialize image_processing create random numpy tensors test not batched input swin2srimageprocessor does not support batched input initialize image_processing create random numpy tensors test not batched input swin2srimageprocessor does not support batched input initialize image_processing create random pytorch tensors test not batched input
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import Swin2SRImageProcessor from transformers.image_transforms import get_image_size class Swin2SRImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_rescale=True, rescale_factor=1 / 255, do_pad=True, pad_size=8, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size def prepare_image_processor_dict(self): return { "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, "pad_size": self.pad_size, } def expected_output_image_shape(self, images): img = images[0] if isinstance(img, Image.Image): input_width, input_height = img.size else: input_height, input_width = img.shape[-2:] pad_height = (input_height // self.pad_size + 1) * self.pad_size - input_height pad_width = (input_width // self.pad_size + 1) * self.pad_size - input_width return self.num_channels, input_height + pad_height, input_width + pad_width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class Swin2SRImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = Swin2SRImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = Swin2SRImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_rescale")) self.assertTrue(hasattr(image_processor, "rescale_factor")) self.assertTrue(hasattr(image_processor, "do_pad")) self.assertTrue(hasattr(image_processor, "pad_size")) def calculate_expected_size(self, image): old_height, old_width = get_image_size(image) size = self.image_processor_tester.pad_size pad_height = (old_height // size + 1) * size - old_height pad_width = (old_width // size + 1) * size - old_width return old_height + pad_height, old_width + pad_width def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) def test_call_numpy_4_channels(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing( image_inputs[0], return_tensors="pt", input_data_format="channels_first" ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch swin2sr model import unittest from transformers import swin2srconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import swin2srforimagesuperresolution swin2srmodel from transformers models swin2sr modelingswin2sr import swin2srpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import swin2srimageprocessor class swin2srmodeltester def init self parent batchsize13 imagesize32 patchsize1 numchannels3 numchannelsout1 embeddim16 depths1 2 1 numheads2 2 4 windowsize2 mlpratio2 0 qkvbiastrue hiddendropoutprob0 0 attentionprobsdropoutprob0 0 droppathrate0 1 hiddenactgelu useabsoluteembeddingsfalse patchnormtrue initializerrange0 02 layernormeps1e5 istrainingtrue scopenone uselabelsfalse upscale2 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self numchannelsout numchannelsout self embeddim embeddim self depths depths self numheads numheads self windowsize windowsize self mlpratio mlpratio self qkvbias qkvbias self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self droppathrate droppathrate self hiddenact hiddenact self useabsoluteembeddings useabsoluteembeddings self patchnorm patchnorm self layernormeps layernormeps self initializerrange initializerrange self istraining istraining self scope scope self uselabels uselabels self upscale upscale here we set some attributes to make tests pass self numhiddenlayers lendepths self hiddensize embeddim self seqlength imagesize patchsize 2 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return swin2srconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels numchannelsoutself numchannelsout embeddimself embeddim depthsself depths numheadsself numheads windowsizeself windowsize mlpratioself mlpratio qkvbiasself qkvbias hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob droppathrateself droppathrate hiddenactself hiddenact useabsoluteembeddingsself useabsoluteembeddings pathnormself patchnorm layernormepsself layernormeps initializerrangeself initializerrange upscaleself upscale def createandcheckmodelself config pixelvalues labels model swin2srmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result lasthiddenstate shape self batchsize self embeddim self imagesize self imagesize def createandcheckforimagesuperresolutionself config pixelvalues labels model swin2srforimagesuperresolutionconfig model totorchdevice model eval result modelpixelvalues expectedimagesize self imagesize self upscale self parent assertequal result reconstruction shape self batchsize self numchannelsout expectedimagesize expectedimagesize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class swin2srmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses swin2srmodel swin2srforimagesuperresolution if istorchavailable else pipelinemodelmapping featureextraction swin2srmodel imagetoimage swin2srforimagesuperresolution if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false testtorchscript false def setupself self modeltester swin2srmodeltesterself self configtester configtesterself configclassswin2srconfig embeddim37 def testconfigself self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelforimagesuperresolutionself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimagesuperresolutionconfigandinputs todo check if this works again for pytorch 2 x y unittest skipreasongot cuda error misaligned address with pytorch 2 0 0 def testmultigpudataparallelforwardself pass unittest skipreasonswin2sr does not use inputsembeds def testinputsembedsself pass unittest skipreasonswin2sr does not support training yet def testtrainingself pass unittest skipreasonswin2sr does not support training yet def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear slow def testmodelfrompretrainedself for modelname in swin2srpretrainedmodelarchivelist 1 model swin2srmodel frompretrainedmodelname self assertisnotnonemodel overwriting because of logitscale parameter def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if logitscale in name continue if param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions expectednumattentions lenself modeltester depths self assertequallenattentions expectednumattentions check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true windowsizesquared config windowsize2 model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions expectednumattentions self assertlistequal listattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions expectednumattentions self assertlistequal listselfattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared requirevision requiretorch slow class swin2srmodelintegrationtestunittest testcase def testinferenceimagesuperresolutionheadself processor swin2srimageprocessor model swin2srforimagesuperresolution frompretrainedcaidasswin2srclassicalsrx264 totorchdevice image image open testsfixturestestssamplescoco000000039769 png inputs processorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 3 976 1296 self assertequaloutputs reconstruction shape expectedshape expectedslice torch tensor 0 5458 0 5546 0 5638 0 5526 0 5565 0 5651 0 5396 0 5426 0 5621 totorchdevice self asserttruetorch allcloseoutputs reconstruction0 0 3 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch swin2sr model here we set some attributes to make tests pass todo check if this works again for pytorch 2 x y overwriting because of logit_scale parameter check that output_attentions also work using config check attention is always last and order is fine forward pass verify the logits
import unittest from transformers import Swin2SRConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Swin2SRForImageSuperResolution, Swin2SRModel from transformers.models.swin2sr.modeling_swin2sr import SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import Swin2SRImageProcessor class Swin2SRModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=1, num_channels=3, num_channels_out=1, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=False, upscale=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_channels_out = num_channels_out self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.upscale = upscale self.num_hidden_layers = len(depths) self.hidden_size = embed_dim self.seq_length = (image_size // patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return Swin2SRConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_channels_out=self.num_channels_out, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, upscale=self.upscale, ) def create_and_check_model(self, config, pixel_values, labels): model = Swin2SRModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.embed_dim, self.image_size, self.image_size) ) def create_and_check_for_image_super_resolution(self, config, pixel_values, labels): model = Swin2SRForImageSuperResolution(config) model.to(torch_device) model.eval() result = model(pixel_values) expected_image_size = self.image_size * self.upscale self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels_out, expected_image_size, expected_image_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Swin2SRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Swin2SRModel, Swin2SRForImageSuperResolution) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": Swin2SRModel, "image-to-image": Swin2SRForImageSuperResolution} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = Swin2SRModelTester(self) self.config_tester = ConfigTester(self, config_class=Swin2SRConfig, embed_dim=37) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_image_super_resolution(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_super_resolution(*config_and_inputs) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="Swin2SR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Swin2SR does not support training yet") def test_training(self): pass @unittest.skip(reason="Swin2SR does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) @slow def test_model_from_pretrained(self): for model_name in SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Swin2SRModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "logit_scale" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) @require_vision @require_torch @slow class Swin2SRModelIntegrationTest(unittest.TestCase): def test_inference_image_super_resolution_head(self): processor = Swin2SRImageProcessor() model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-classical-sr-x2-64").to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size([1, 3, 976, 1296]) self.assertEqual(outputs.reconstruction.shape, expected_shape) expected_slice = torch.tensor( [[0.5458, 0.5546, 0.5638], [0.5526, 0.5565, 0.5651], [0.5396, 0.5426, 0.5621]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-4))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch swinv2 model import collections import unittest from transformers import swinv2config from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import swinv2forimageclassification swinv2formaskedimagemodeling swinv2model from transformers models swinv2 modelingswinv2 import swinv2pretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class swinv2modeltester def init self parent batchsize13 imagesize32 patchsize2 numchannels3 embeddim16 depths1 2 1 numheads2 2 4 windowsize2 mlpratio2 0 qkvbiastrue hiddendropoutprob0 0 attentionprobsdropoutprob0 0 droppathrate0 1 hiddenactgelu useabsoluteembeddingsfalse patchnormtrue initializerrange0 02 layernormeps1e5 istrainingtrue scopenone uselabelstrue typesequencelabelsize10 encoderstride8 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self embeddim embeddim self depths depths self numheads numheads self windowsize windowsize self mlpratio mlpratio self qkvbias qkvbias self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self droppathrate droppathrate self hiddenact hiddenact self useabsoluteembeddings useabsoluteembeddings self patchnorm patchnorm self layernormeps layernormeps self initializerrange initializerrange self istraining istraining self scope scope self uselabels uselabels self typesequencelabelsize typesequencelabelsize self encoderstride encoderstride def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return swinv2config imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels embeddimself embeddim depthsself depths numheadsself numheads windowsizeself windowsize mlpratioself mlpratio qkvbiasself qkvbias hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob droppathrateself droppathrate hiddenactself hiddenact useabsoluteembeddingsself useabsoluteembeddings pathnormself patchnorm layernormepsself layernormeps initializerrangeself initializerrange encoderstrideself encoderstride def createandcheckmodelself config pixelvalues labels model swinv2modelconfigconfig model totorchdevice model eval result modelpixelvalues expectedseqlen config imagesize config patchsize 2 4 lenconfig depths 1 expecteddim intconfig embeddim 2 lenconfig depths 1 self parent assertequalresult lasthiddenstate shape self batchsize expectedseqlen expecteddim def createandcheckformaskedimagemodelingself config pixelvalues labels model swinv2formaskedimagemodelingconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result logits shape self batchsize self numchannels self imagesize self imagesize test greyscale images config numchannels 1 model swinv2formaskedimagemodelingconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize 1 self imagesize self imagesize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model swinv2forimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class swinv2modeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses swinv2model swinv2forimageclassification swinv2formaskedimagemodeling if istorchavailable else pipelinemodelmapping featureextraction swinv2model imageclassification swinv2forimageclassification if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester swinv2modeltesterself self configtester configtesterself configclassswinv2config embeddim37 def testconfigself self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs todo check if this works again for pytorch 2 x y unittest skipreasongot cuda error misaligned address with pytorch 2 0 0 def testmultigpudataparallelforwardself pass unittest skipreasonswinv2 does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions expectednumattentions lenself modeltester depths self assertequallenattentions expectednumattentions check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true windowsizesquared config windowsize2 model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions expectednumattentions self assertlistequal listattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes else also another 1 for reshapedhiddenstates addedhiddenstates 2 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs attentions self assertequallenselfattentions expectednumattentions self assertlistequal listselfattentions0 shape3 self modeltester numheads0 windowsizesquared windowsizesquared def checkhiddenstatesoutputself inputsdict config modelclass imagesize model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers lenself modeltester depths 1 self assertequallenhiddenstates expectednumlayers swinv2 has a different seqlength patchsize config patchsize if isinstanceconfig patchsize collections abc iterable else config patchsize config patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self assertlistequal listhiddenstates0 shape2 numpatches self modeltester embeddim reshapedhiddenstates outputs reshapedhiddenstates self assertequallenreshapedhiddenstates expectednumlayers batchsize numchannels height width reshapedhiddenstates0 shape reshapedhiddenstates reshapedhiddenstates0 viewbatchsize numchannels height width permute0 2 1 self assertlistequal listreshapedhiddenstates shape2 numpatches self modeltester embeddim def testhiddenstatesoutputself config inputsdict self modeltester prepareconfigandinputsforcommon imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize for modelclass in self allmodelclasses inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass imagesize def testhiddenstatesoutputwithpaddingself config inputsdict self modeltester prepareconfigandinputsforcommon config patchsize 3 imagesize self modeltester imagesize if isinstanceself modeltester imagesize collections abc iterable else self modeltester imagesize self modeltester imagesize patchsize config patchsize if isinstanceconfig patchsize collections abc iterable else config patchsize config patchsize paddedheight imagesize0 patchsize0 imagesize0 patchsize0 paddedwidth imagesize1 patchsize1 imagesize1 patchsize1 for modelclass in self allmodelclasses inputsdictoutputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true self checkhiddenstatesoutputinputsdict config modelclass paddedheight paddedwidth def testformaskedimagemodelingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedimagemodelingconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in swinv2pretrainedmodelarchivelist 1 model swinv2model frompretrainedmodelname self assertisnotnonemodel def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if embeddings not in name and logitscale not in name and param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized requirevision requiretorch class swinv2modelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return autoimageprocessor frompretrainedmicrosoftswinv2tinypatch4window8256 if isvisionavailable else none slow def testinferenceimageclassificationheadself model swinv2forimageclassification frompretrainedmicrosoftswinv2tinypatch4window8256 to torchdevice imageprocessor self defaultimageprocessor image image open testsfixturestestssamplescoco000000039769 png inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 3947 0 4306 0 0026 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch swinv2 model test greyscale images todo check if this works again for pytorch 2 x y check that output_attentions also work using config check attention is always last and order is fine also another 1 for reshaped_hidden_states swinv2 has a different seq_length check that output_hidden_states also work using config check that output_hidden_states also work using config forward pass verify the logits
import collections import unittest from transformers import Swinv2Config from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Swinv2ForImageClassification, Swinv2ForMaskedImageModeling, Swinv2Model from transformers.models.swinv2.modeling_swinv2 import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class Swinv2ModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return Swinv2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = Swinv2Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = Swinv2ForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) config.num_channels = 1 model = Swinv2ForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = Swinv2ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Swinv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Swinv2Model, Swinv2ForImageClassification, Swinv2ForMaskedImageModeling) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": Swinv2Model, "image-classification": Swinv2ForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Swinv2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Swinv2Config, embed_dim=37) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="Swinv2 does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Swinv2Model.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_vision @require_torch class Swinv2ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = Swinv2ForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256").to( torch_device ) image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.3947, -0.4306, 0.0026]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2022 google switchtransformers s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for common tests for common tests make sure that lmlabels are correctly padded from the right add casaul pad token mask first item items before diagonal pad items after diagonal all items after square there should be numlayers key value embeddings stored in decoderpast there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoderpast tuple first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical multiple next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice model switchtransformersforconditionalgeneration frompretrainedgoogleswitchbase8 totorchdevice eval torch manualseed0 outputwithoutpastcache model generate inputids 1 numbeams2 maxlength5 dosampletrue usecachefalse torch manualseed0 outputwithpastcache model generateinputids 1 numbeams2 maxlength5 dosampletrue self parent asserttruetorch alloutputwithpastcache outputwithoutpastcache def createandcheckmodelfp16forward self config inputids decoderinputids attentionmask decoderattentionmask lmlabels model switchtransformersmodelconfigconfig totorchdevice half eval output modelinputids decoderinputidsinputids attentionmaskattentionmasklasthiddenstate self parent assertfalsetorch isnanoutput any item def createandcheckencoderdecodersharedweights self config inputids decoderinputids attentionmask decoderattentionmask lmlabels for modelclass in switchtransformersmodel switchtransformersforconditionalgeneration torch manualseed0 model modelclassconfigconfig totorchdevice eval load state dict copies weights but does not tie them model encoder loadstatedictmodel decoder statedict strictfalse torch manualseed0 tiedconfig copy deepcopyconfig tiedconfig tieencoderdecoder true tiedmodel modelclassconfigtiedconfig totorchdevice eval modelresult model inputidsinputids decoderinputidsdecoderinputids attentionmaskattentionmask decoderattentionmaskdecoderattentionmask tiedmodelresult tiedmodel inputidsinputids decoderinputidsdecoderinputids attentionmaskattentionmask decoderattentionmaskdecoderattentionmask check that models has less parameters self parent assertless sump numel for p in tiedmodel parameters sump numel for p in model parameters randomsliceidx idstensor1 modelresult0 shape1 item check that outputs are equal self parent asserttrue torch allclose modelresult00 randomsliceidx tiedmodelresult00 randomsliceidx atol1e4 check that outputs after saving and loading are equal with tempfile temporarydirectory as tmpdirname tiedmodel savepretrainedtmpdirname tiedmodel modelclass frompretrainedtmpdirname tiedmodel totorchdevice tiedmodel eval check that models has less parameters self parent assertless sump numel for p in tiedmodel parameters sump numel for p in model parameters randomsliceidx idstensor1 modelresult0 shape1 item tiedmodelresult tiedmodel inputidsinputids decoderinputidsdecoderinputids attentionmaskattentionmask decoderattentionmaskdecoderattentionmask check that outputs are equal self parent asserttrue torch allclose modelresult00 randomsliceidx tiedmodelresult00 randomsliceidx atol1e4 def checkresizeembeddingsswitchtransformersv11 self config prevvocabsize config vocabsize config tiewordembeddings false model switchtransformersforconditionalgenerationconfigconfig totorchdevice eval model resizetokenembeddingsprevvocabsize 10 self parent assertequalmodel getinputembeddings weight shape0 prevvocabsize 10 self parent assertequalmodel getoutputembeddings weight shape0 prevvocabsize 10 self parent assertequalmodel config vocabsize prevvocabsize 10 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids decoderinputids attentionmask decoderattentionmask lmlabels configandinputs inputsdict inputids inputids attentionmask attentionmask decoderinputids decoderinputids decoderattentionmask decoderattentionmask usecache false outputrouterlogits false return config inputsdict requiretorch class switchtransformersmodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses switchtransformersmodel switchtransformersforconditionalgeneration if istorchavailable else allgenerativemodelclasses switchtransformersforconditionalgeneration if istorchavailable else pipelinemodelmapping conversational switchtransformersforconditionalgeneration featureextraction switchtransformersmodel summarization switchtransformersforconditionalgeneration text2textgeneration switchtransformersforconditionalgeneration translation switchtransformersforconditionalgeneration if istorchavailable else fxcompatible false testpruning false testresizeembeddings true testmodelparallel false isencoderdecoder true testtorchscript false the small switchtransformers model needs higher percentages for cpump tests modelsplitpercents 0 8 0 9 def setupself self modeltester switchtransformersmodeltesterself self configtester configtesterself configclassswitchtransformersconfig dmodel37 def testconfigself self configtester runcommontests def testshiftrightself configandinputs self modeltester prepareconfigandinputs self modeltester checkpreparelmlabelsviashiftleftconfigandinputs def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelv11self configandinputs self modeltester prepareconfigandinputs check that gated gelu feed forward and different word embeddings work config configandinputs0 config tiewordembeddings false config feedforwardproj gatedgelu self modeltester createandcheckmodelconfig configandinputs1 def testconfigandmodelsilugatedself configandinputs self modeltester prepareconfigandinputs config configandinputs0 config feedforwardproj gatedsilu self modeltester createandcheckmodelconfigandinputs def testwithlmheadself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckwithlmheadconfigandinputs def testdecodermodelpastself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastconfigandinputs def testdecodermodelpastwithattnmaskself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelattentionmaskpastconfigandinputs def testdecodermodelpastwith3dattnmaskself config inputids decoderinputids attentionmask decoderattentionmask lmlabels self modeltester prepareconfigandinputs attentionmask idstensor self modeltester batchsize self modeltester encoderseqlength self modeltester encoderseqlength vocabsize2 decoderattentionmask idstensor self modeltester batchsize self modeltester decoderseqlength self modeltester decoderseqlength vocabsize2 self modeltester createandcheckdecodermodelattentionmaskpast config inputids decoderinputids attentionmask decoderattentionmask lmlabels def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def testgeneratewithpastkeyvaluesself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckgeneratewithpastkeyvaluesconfigandinputs def testencoderdecodersharedweightsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckencoderdecodersharedweightsconfigandinputs unittest skipiftorchdevice cpu cant do half precision def testmodelfp16forwardself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelfp16forwardconfigandinputs def testv11resizeembeddingsself config self modeltester prepareconfigandinputs0 self modeltester checkresizeembeddingsswitchtransformersv11config slow def testmodelfrompretrainedself for modelname in switchtransformerspretrainedmodelarchivelist 1 model switchtransformersmodel frompretrainedmodelname self assertisnotnonemodel unittest skiptest has a segmentation fault on torch 1 8 0 def testexporttoonnxself configandinputs self modeltester prepareconfigandinputs model switchtransformersmodelconfigandinputs0 totorchdevice with tempfile temporarydirectory as tmpdirname torch onnx export model configandinputs1 configandinputs3 configandinputs2 ftmpdirnameswitchtransformerstest onnx exportparamstrue opsetversion9 inputnamesinputids decoderinputids def testgeneratewithheadmaskingself attentionnames encoderattentions decoderattentions crossattentions configandinputs self modeltester prepareconfigandinputs config configandinputs0 maxlength configandinputs1 shape1 3 model switchtransformersforconditionalgenerationconfig eval model totorchdevice headmasking headmask torch zerosconfig numlayers config numheads devicetorchdevice decoderheadmask torch zerosconfig numdecoderlayers config numheads devicetorchdevice crossattnheadmask torch zerosconfig numdecoderlayers config numheads devicetorchdevice for attnname name mask in zipattentionnames headmasking items headmasks name mask explicitly pass decoderheadmask as it is required from switchtransformers model when headmask specified if name headmask headmasksdecoderheadmask torch ones config numdecoderlayers config numheads devicetorchdevice out model generate configandinputs1 numbeams1 maxlengthmaxlength outputattentionstrue returndictingeneratetrue headmasks we check the state of decoderattentions and crossattentions just from the last step attnweights outattnname if attnname attentionnames0 else outattnname1 self assertequalsumw sum item for w in attnweights 0 0 unittest skipdoes not work on the tiny model as we keep hitting edge cases def testdiskoffloadself pass class switchtransformersencoderonlymodeltester def init self parent vocabsize99 batchsize13 encoderseqlength7 for common tests useattentionmasktrue hiddensize32 numhiddenlayers2 numattentionheads4 dff37 relativeattentionnumbuckets8 istrainingfalse dropoutrate0 1 initializerfactor0 002 isencoderdecoderfalse eostokenid1 padtokenid0 scopenone self parent parent self batchsize batchsize self encoderseqlength encoderseqlength for common tests self seqlength self encoderseqlength self useattentionmask useattentionmask self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self dff dff self relativeattentionnumbuckets relativeattentionnumbuckets self dropoutrate dropoutrate self initializerfactor initializerfactor self eostokenid eostokenid self padtokenid padtokenid self isencoderdecoder isencoderdecoder self scope none self istraining istraining def getlargemodelconfigself return switchtransformersconfig frompretrainedswitchbase8 def prepareconfigandinputsself inputids idstensorself batchsize self encoderseqlength self vocabsize attentionmask none if self useattentionmask attentionmask idstensorself batchsize self encoderseqlength vocabsize2 config switchtransformersconfig vocabsizeself vocabsize dmodelself hiddensize dffself dff dkvself hiddensize self numattentionheads numlayersself numhiddenlayers numheadsself numattentionheads relativeattentionnumbucketsself relativeattentionnumbuckets dropoutrateself dropoutrate initializerfactorself initializerfactor eostokenidself eostokenid bostokenidself padtokenid padtokenidself padtokenid isencoderdecoderself isencoderdecoder return config inputids attentionmask def createandcheckmodelself config inputids attentionmask model switchtransformersencodermodelconfigconfig model totorchdevice model eval result model inputidsinputids attentionmaskattentionmask result modelinputidsinputids encoderoutput result lasthiddenstate self parent assertequalencoderoutput size self batchsize self encoderseqlength self hiddensize def createandcheckmodelfp16forwardself config inputids attentionmask model switchtransformersencodermodelconfigconfig totorchdevice half eval output modelinputids attentionmaskattentionmasklasthiddenstate self parent assertfalsetorch isnanoutput any item def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask configandinputs inputsdict inputids inputids attentionmask attentionmask return config inputsdict class switchtransformersencoderonlymodeltestmodeltestermixin unittest testcase allmodelclasses switchtransformersencodermodel if istorchavailable else testpruning false testresizeembeddings false testmodelparallel false testtorchscript false def setupself self modeltester switchtransformersencoderonlymodeltesterself self configtester configtesterself configclassswitchtransformersconfig dmodel37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipiftorchdevice cpu cant do half precision def testmodelfp16forwardself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelfp16forwardconfigandinputs def usetaskspecificparamsmodel task model config updatemodel config taskspecificparamstask requiretorch class testasymmetricswitchtransformersunittest testcase def buildmodelandcheckforwardpassself kwargs tester switchtransformersmodeltesterself kwargs config inputs tester prepareconfigandinputs inputids decoderinputids attentionmask decoderattentionmask lmlabels inputs model switchtransformersforconditionalgenerationconfigconfig totorchdevice eval outputs model inputidsinputids decoderinputidsdecoderinputids decoderattentionmaskdecoderattentionmask labelslmlabels outputrouterlogitsfalse outputs modelinputs assert lenoutputs 4 assert outputslogits size tester batchsize tester decoderseqlength tester vocabsize assert outputsloss size return model def testsmalldecoderself numhiddenlayers is passed to switchtransformersconfig as numlayers model self buildmodelandcheckforwardpassdecoderlayers1 numhiddenlayers2 assert lenmodel encoder block 2 assert lenmodel decoder block 1 def testdefaultingtosymmetryself numhiddenlayers is passed to switchtransformersconfig as numlayers model self buildmodelandcheckforwardpassnumhiddenlayers2 assert lenmodel decoder block lenmodel encoder block 2 requiretorch class switchtransformerroutertestunittest testcase r switch transformers has different blocks from classic transformer based models the swift mlp contains a router class that has to be tested to check if it is correctly implemented original implementation of the routers here routerprobs torch tensor 0 35490513 0 60419905 0 4275843 0 23061597 0 32985854 0 43953657 0 25099766 0 27730572 0 7678207 0 71474564 expertindices torch tensor0 1 1 0 0 totorch int32 loss loadbalancinglossfuncrouterprobs expertindices self assertalmostequalloss item 0 8741045 places5 def testequivalencyrouterzlossself r this test checks if the router z loss is correctly implemented as in the original implementation of the switch transformer inputtokens torch tensor 0 6433916 0 18188512 0 02240455 0 563781 0 5526401 0 0958724 0 34253013 0 03644359 0 08744538 0 7909105 0 35205448 0 53364205 0 02900076 0 4168595 0 5802449 0 91486526 0 27414513 0 14991808 0 9383501 0 5209162 0 51207185 0 90618336 0 7309413 0 95533276 model switchtransformerstop1routerself config model classifier weight torch nn parameter torch tensor 0 02008116 0 00620062 0 00811031 0 00031623 0 03542127 0 02703803 0 02335377 0 02971946 t expertindex routerlogits modelinputtokens routerprobs torch softmaxrouterlogits dim1 routerzloss routerzlossfuncrouterlogits auxiliaryloss loadbalancinglossfuncrouterprobs torch argmaxexpertindex dim1 self assertalmostequalauxiliaryloss item 1 000308 places5 self assertalmostequalrouterzloss item 0 4789799 places5 self asserttruetorch allcloseexpertindex bool unsqueeze1 expecteddispatchmask def testmaxroutingcapacityself model switchtransformerstop1routerself config seqlen 128 batchsize 4 hiddenstates torch stackbatchsize torch randseqlen self config hiddensize routerprobs routerlogits model computerouterprobabilitieshiddenstates expertindex torch argmaxrouterprobs dim1 expertindex torch nn functional onehotexpertindex numclassesself config numexperts tokenpriority torch cumsumexpertindex dim2 expertcapacitymask tokenpriority self config expertcapacity expertindex expertindex expertcapacitymask assert torch sumexpertindex batchsize self config numexperts self config expertcapacity slow requiretorch requiretokenizers class switchtransformermodelintegrationtestsunittest testcase requiretorchaccelerator requiretorchbf16 def testsmalllogitsself r logits testing to check implementation consistency between t5x implementation and transformers implementation of switchc transformers we only check the logits of the first batch fmt off fmt on generate test using the smalled switchc model coding utf 8 2022 google switchtransformers s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for common tests for common tests switch_transformers forces 100 extra tokens make sure that lm_labels are correctly padded from the right add casaul pad token mask first item items before diagonal pad items after diagonal all items after square there should be num_layers key value embeddings stored in decoder_past there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoder_past tuple first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice this test does not pass for small models due to precision errors it is therefore only run for slightly larger models load state dict copies weights but does not tie them check that models has less parameters check that outputs are equal check that outputs after saving and loading are equal check that models has less parameters check that outputs are equal the small switch_transformers model needs higher percentages for cpu mp tests check that gated gelu feed forward and different word embeddings work explicitly pass decoder_head_mask as it is required from switch_transformers model when head_mask specified we check the state of decoder_attentions and cross_attentions just from the last step for common tests for common tests outputs model inputs num_hidden_layers is passed to switchtransformersconfig as num_layers num_hidden_layers is passed to switchtransformersconfig as num_layers switch transformers has different blocks from classic transformer based models the swift mlp contains a router class that has to be tested to check if it is correctly implemented original implementation of the routers here this test checks if the balancy loss is correctly implemented as in the original implementation of the switch transformer this test checks if the router z loss is correctly implemented as in the original implementation of the switch transformer this test tests the equivalency between the switchtransformerstop1router originally implemented from here todo provide link self asserttrue torch allclose expert_index bool unsqueeze 1 expected_dispatch_mask logits testing to check implementation consistency between t5x implementation and transformers implementation of switch c transformers we only check the logits of the first batch fmt off fmt on generate test using the smalled switch c model
import copy import tempfile import unittest from transformers import SwitchTransformersConfig, is_torch_available from transformers.testing_utils import ( require_tokenizers, require_torch, require_torch_accelerator, require_torch_bf16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, SwitchTransformersEncoderModel, SwitchTransformersForConditionalGeneration, SwitchTransformersModel, SwitchTransformersTop1Router, ) from transformers.models.switch_transformers.modeling_switch_transformers import ( SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST, load_balancing_loss_func, router_z_loss_func, ) class SwitchTransformersModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, decoder_layers=None, sparse_step=1, num_sparse_decoder_layers=2, num_sparse_encoder_layers=2, expert_capacity=100, router_jitter_noise=0.0, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers self.sparse_step = sparse_step self.num_sparse_decoder_layers = num_sparse_decoder_layers self.num_sparse_encoder_layers = num_sparse_encoder_layers self.expert_capacity = expert_capacity self.router_jitter_noise = router_jitter_noise def get_large_model_config(self): return SwitchTransformersConfig.from_pretrained("google/switch-base-8") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return SwitchTransformersConfig( vocab_size=166, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, expert_capacity=self.expert_capacity, router_jitter_noise=self.router_jitter_noise, ) def get_config(self): return SwitchTransformersConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, sparse_step=self.sparse_step, num_sparse_encoder_layers=self.num_sparse_encoder_layers, num_sparse_decoder_layers=self.num_sparse_decoder_layers, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = SwitchTransformersModel(config=config) model.to(torch_device) model.eval() lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = SwitchTransformersModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) self.parent.assertEqual(len(decoder_past), config.num_layers) self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = SwitchTransformersForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 10) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = SwitchTransformersModel(config=config).get_decoder().to(torch_device).eval() outputs = model(input_ids, use_cache=True, output_router_logits=False) outputs_use_cache_conf = model(input_ids, output_router_logits=False) outputs_no_past = model(input_ids, use_cache=False, output_router_logits=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids, output_router_logits=False)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, output_router_logits=False)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = SwitchTransformersModel(config=config).get_decoder() model.to(torch_device) model.eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 output, past_key_values = model( input_ids, attention_mask=attn_mask, use_cache=True, output_router_logits=False ).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask, output_router_logits=False)[ "last_hidden_state" ] output_from_past = model( next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_router_logits=False )["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = SwitchTransformersModel(config=config).get_decoder().to(torch_device).eval() outputs = model(input_ids, attention_mask=attention_mask, use_cache=True, output_router_logits=False) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_router_logits=False)[ "last_hidden_state" ] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_router_logits=False, )["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) @slow def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): r model = ( SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-8").to(torch_device).eval() ) torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = SwitchTransformersModel(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [SwitchTransformersModel, SwitchTransformersForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_resize_embeddings_switch_transformers_v1_1( self, config, ): prev_vocab_size = config.vocab_size config.tie_word_embeddings = False model = SwitchTransformersForConditionalGeneration(config=config).to(torch_device).eval() model.resize_token_embeddings(prev_vocab_size - 10) self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, "output_router_logits": False, } return config, inputs_dict @require_torch class SwitchTransformersModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (SwitchTransformersModel, SwitchTransformersForConditionalGeneration) if is_torch_available() else () ) all_generative_model_classes = (SwitchTransformersForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": SwitchTransformersForConditionalGeneration, "feature-extraction": SwitchTransformersModel, "summarization": SwitchTransformersForConditionalGeneration, "text2text-generation": SwitchTransformersForConditionalGeneration, "translation": SwitchTransformersForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = True test_model_parallel = False is_encoder_decoder = True test_torchscript = False model_split_percents = [0.8, 0.9] def setUp(self): self.model_tester = SwitchTransformersModelTester(self) self.config_tester = ConfigTester(self, config_class=SwitchTransformersConfig, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_config_and_model_silu_gated(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.feed_forward_proj = "gated-silu" self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_switch_transformers_v1_1(config) @slow def test_model_from_pretrained(self): for model_name in SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SwitchTransformersModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = SwitchTransformersModel(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/switch_transformers_test.onnx", export_params=True, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] max_length = config_and_inputs[1].shape[-1] + 3 model = SwitchTransformersForConditionalGeneration(config).eval() model.to(torch_device) head_masking = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), } for attn_name, (name, mask) in zip(attention_names, head_masking.items()): head_masks = {name: mask} if name == "head_mask": head_masks["decoder_head_mask"] = torch.ones( config.num_decoder_layers, config.num_heads, device=torch_device ) out = model.generate( config_and_inputs[1], num_beams=1, max_length=max_length, output_attentions=True, return_dict_in_generate=True, **head_masks, ) attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload(self): pass class SwitchTransformersEncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def get_large_model_config(self): return SwitchTransformersConfig.from_pretrained("switch_base_8") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = SwitchTransformersConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return config, input_ids, attention_mask def create_and_check_model(self, config, input_ids, attention_mask): model = SwitchTransformersEncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def create_and_check_model_fp16_forward(self, config, input_ids, attention_mask): model = SwitchTransformersEncoderModel(config=config).to(torch_device).half().eval() output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class SwitchTransformersEncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SwitchTransformersEncoderModel,) if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_model_parallel = False test_torchscript = False def setUp(self): self.model_tester = SwitchTransformersEncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=SwitchTransformersConfig, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) @require_torch class TestAsymmetricSwitchTransformers(unittest.TestCase): def build_model_and_check_forward_pass(self, **kwargs): tester = SwitchTransformersModelTester(self, **kwargs) config, *inputs = tester.prepare_config_and_inputs() ( input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = inputs model = SwitchTransformersForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, output_router_logits=False, ) assert len(outputs) == 4 assert outputs["logits"].size() == (tester.batch_size, tester.decoder_seq_length, tester.vocab_size) assert outputs["loss"].size() == () return model def test_small_decoder(self): model = self.build_model_and_check_forward_pass(decoder_layers=1, num_hidden_layers=2) assert len(model.encoder.block) == 2 assert len(model.decoder.block) == 1 def test_defaulting_to_symmetry(self): model = self.build_model_and_check_forward_pass(num_hidden_layers=2) assert len(model.decoder.block) == len(model.encoder.block) == 2 @require_torch class SwitchTransformerRouterTest(unittest.TestCase): r config = SwitchTransformersConfig( num_experts=2, hidden_size=8, d_ff=16, router_jitter_noise=0, expert_capacity=4, ) def test_equivalency_balancy_loss(self): r router_probs = torch.Tensor( [ [0.35490513, 0.60419905], [0.4275843, 0.23061597], [0.32985854, 0.43953657], [0.25099766, 0.27730572], [0.7678207, 0.71474564], ] ) expert_indices = torch.Tensor([[0], [1], [1], [0], [0]]).to(torch.int32) loss = load_balancing_loss_func(router_probs, expert_indices) self.assertAlmostEqual(loss.item(), 0.8741045, places=5) def test_equivalency_router_z_loss(self): r logits = torch.Tensor( [ [ [-4.2124424, 3.891939, -3.6481273, 1.8849981], [0.32625437, 2.918651, 0.84758997, -4.556842], [-3.32062, 4.6977115, -0.15439987, 0.44086337], [3.4467149, 4.3436565, -4.7224274, -4.264637], [-2.224406, -2.5318158, -1.3832569, 1.1891162], [-2.320062, -0.44705987, 4.289819, -0.00662684], ], [ [0.99470854, -0.6992364, 0.25503993, 4.2952085], [3.5937333, -3.2408535, -4.298278, 4.426601], [0.7669008, 2.6588762, 2.4505413, 4.6051874], [0.23330331, -3.0845237, 0.6262374, -2.9865491], [0.7595146, -2.1099675, -4.155346, -2.8326452], [2.3771453, 1.004138, -3.1781673, 0.7581556], ], ] ) loss = router_z_loss_func(logits) self.assertAlmostEqual(loss.item(), 13.786719, places=5) def test_equivalency_token_chose_masked_router(self): r input_tokens = torch.Tensor( [ [ [0.6433916, 0.18188512, 0.02240455, 0.563781], [0.5526401, 0.0958724, 0.34253013, 0.03644359], [0.08744538, 0.7909105, 0.35205448, 0.53364205], ], [ [0.02900076, 0.4168595, 0.5802449, 0.91486526], [0.27414513, 0.14991808, 0.9383501, 0.5209162], [0.51207185, 0.90618336, 0.7309413, 0.95533276], ], ] ) model = SwitchTransformersTop1Router(self.config) model.classifier.weight = torch.nn.Parameter( torch.Tensor( [ [0.02008116, 0.00620062], [-0.00811031, -0.00031623], [-0.03542127, 0.02703803], [0.02335377, -0.02971946], ], ).t() ) expert_index, _, router_logits = model(input_tokens) router_probs = torch.softmax(router_logits, dim=-1) router_z_loss = router_z_loss_func(router_logits) auxiliary_loss = load_balancing_loss_func(router_probs, torch.argmax(expert_index, dim=-1)) self.assertAlmostEqual(auxiliary_loss.item(), 1.000308, places=5) self.assertAlmostEqual(router_z_loss.item(), 0.4789799, places=5) def test_max_routing_capacity(self): model = SwitchTransformersTop1Router(self.config) seq_len = 128 batch_size = 4 hidden_states = torch.stack(batch_size * [torch.rand((seq_len, self.config.hidden_size))]) router_probs, router_logits = model._compute_router_probabilities(hidden_states) expert_index = torch.argmax(router_probs, dim=-1) expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.config.num_experts) token_priority = torch.cumsum(expert_index, dim=-2) expert_capacity_mask = token_priority <= self.config.expert_capacity expert_index = expert_index * expert_capacity_mask assert torch.sum(expert_index) <= batch_size * self.config.num_experts * self.config.expert_capacity @slow @require_torch @require_tokenizers class SwitchTransformerModelIntegrationTests(unittest.TestCase): @require_torch_accelerator @require_torch_bf16 def test_small_logits(self): r model = SwitchTransformersModel.from_pretrained("google/switch-base-8", torch_dtype=torch.bfloat16).to( torch_device ) input_ids = torch.ones((32, 64), dtype=torch.long).to(torch_device) decoder_input_ids = torch.ones((32, 64), dtype=torch.long).to(torch_device) EXPECTED_MEAN_LOGITS = torch.Tensor( [ -0.204102, -0.193359, 0.523438, -0.296875, 0.108887, 0.0211182, 0.605469, -0.100586, -0.0551758, 0.296875, 0.0090332, 0.174805, 0.139648, -0.170898, -0.0981445, 0.0245361, 0.0373535, 0.050293, -0.212891, 0.129883, 0.390625, -0.203125, -0.122559, -0.180664, 0.0437012, -0.349609, -0.0250244, -0.104004, -0.15918, -0.133789 ] ).to(torch.bfloat16) hf_logits = model(input_ids, decoder_input_ids=decoder_input_ids).last_hidden_state.cpu() hf_logits = hf_logits[0, 0, :30] torch.testing.assert_allclose(hf_logits, EXPECTED_MEAN_LOGITS, rtol=6e-3, atol=9e-3) @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def test_small_generate(self): model = SwitchTransformersForConditionalGeneration.from_pretrained( "google/switch-base-8", torch_dtype=torch.bfloat16 ).eval() tokenizer = AutoTokenizer.from_pretrained("t5-small", use_fast=False, legacy=False) model = model.to(torch_device) input_ids = tokenizer( "The human walks into a bar and orders a <extra_id_0>", return_tensors="pt" ).input_ids.to(torch_device) sequences = model.generate(input_ids) output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertEqual(output_str, "drink.") input_ids = tokenizer( "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", return_tensors="pt", ).input_ids.to(torch_device) sequences = model.generate(input_ids) output_str = tokenizer.batch_decode(sequences, skip_special_tokens=False)[0] EXPECTED_OUTPUT = "<pad><extra_id_0> man<extra_id_1> beer<extra_id_2> a<extra_id_3> whiskey<extra_id_4>.</s>" self.assertEqual(output_str, EXPECTED_OUTPUT) @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def test_small_batch_generate(self): BATCH_SIZE = 4 model = SwitchTransformersForConditionalGeneration.from_pretrained( "google/switch-base-8", torch_dtype=torch.bfloat16 ).eval() tokenizer = AutoTokenizer.from_pretrained("t5-small", use_fast=False, legacy=False) inputs = [ "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." ] * BATCH_SIZE encoded_input = tokenizer.batch_encode_plus(inputs, return_tensors="pt") sequences = model.generate(**encoded_input) batch_output = tokenizer.batch_decode(sequences, skip_special_tokens=False) for i in range(0, BATCH_SIZE, 2): self.assertEqual(batch_output[i], batch_output[i + 1])
codingutf8 2021 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs ioenlatestgpumemoryallocation html for common tests for common tests prevent fully zero d out attention mask check that gated gelu feed forward and different word embeddings work overwrite since special base model prefix is used check that all base model weights are loaded correctly overwrite since special base model prefix is used check that all base model weights are loaded correctly overwrite since special base model prefix is used convert flax model to pytorch model check that all base model weights are loaded correctly save pt model overwrite since special base model prefix is used convert flax model to pytorch model check that all base model weights are loaded correctly overwrite since special base model prefix is used convert flax model to pytorch model check that all base model weights are loaded correctly for common tests for common tests check that gated gelu feed forward and different word embeddings work overwrite since special base model prefix is used check that all base model weights are loaded correctly overwrite since special base model prefix is used check that all base model weights are loaded correctly overwrite since special base model prefix is used convert flax model to pytorch model check that all base model weights are loaded correctly save pt model overwrite since special base model prefix is used convert flax model to pytorch model check that all base model weights are loaded correctly overwrite since special base model prefix is used convert flax model to pytorch model check that all base model weights are loaded correctly for comparision run import t5 pip install t50 7 1 from t5 data sentencepiecevocabulary import sentencepiecevocabulary pathtomtfsmallt5checkpoint fillin pathtomtfsmallspmmodelpath fillin t5model t5 models mtfmodelmodeldirpathtomtfsmallt5checkpoint batchsize1 tpunone vocab sentencepiecevocabularypathtomtfsmallspmmodelpath extraids100 score t5model scoreinputshello there targetshi i am vocabularyvocab for comparision run import t5 pip install t50 7 1 from t5 data sentencepiecevocabulary import sentencepiecevocabulary pathtomtfsmallt5v11checkpoint fillin pathtomtfsmallspmmodelpath fillin t5model t5 models mtfmodelmodeldirpathtomtfsmallt5v11checkpoint batchsize1 tpunone vocab sentencepiecevocabularypathtomtfsmallspmmodelpath extraids100 score t5model scoreinputshello there targetshi i am vocabularyvocab for comparision run import t5 pip install t50 9 1 pathtobyt5smallcheckpoint fillin t5model t5 models mtfmodelmodeldirpathtotfcheckpoint batchsize1 tpunone vocab t5 data bytevocabulary score t5model scoreinputshello there targetshi i am vocabularyvocab coding utf 8 2021 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the slow tests are often failing with oom error on gpu this makes jax allocate exactly what is needed on demand and deallocate memory that is no longer needed but will be slower as stated here https jax readthedocs io en latest gpu_memory_allocation html for common tests for common tests prevent fully zero d out attention mask check that gated gelu feed forward and different word embeddings work overwrite since special base model prefix is used check that all base model weights are loaded correctly overwrite since special base model prefix is used check that all base model weights are loaded correctly overwrite since special base model prefix is used convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly save pt model overwrite since special base model prefix is used convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly overwrite since special base model prefix is used convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly for common tests for common tests check that gated gelu feed forward and different word embeddings work overwrite since special base model prefix is used check that all base model weights are loaded correctly overwrite since special base model prefix is used check that all base model weights are loaded correctly overwrite since special base model prefix is used convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly save pt model overwrite since special base model prefix is used convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly overwrite since special base model prefix is used convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly for comparision run import t5 pip install t5 0 7 1 from t5 data sentencepiece_vocabulary import sentencepiecevocabulary path_to_mtf_small_t5_checkpoint fill_in path_to_mtf_small_spm_model_path fill_in t5_model t5 models mtfmodel model_dir path_to_mtf_small_t5_checkpoint batch_size 1 tpu none vocab sentencepiecevocabulary path_to_mtf_small_spm_model_path extra_ids 100 score t5_model score inputs hello there targets hi i am vocabulary vocab for comparision run import t5 pip install t5 0 7 1 from t5 data sentencepiece_vocabulary import sentencepiecevocabulary path_to_mtf_small_t5_v1_1_checkpoint fill_in path_to_mtf_small_spm_model_path fill_in t5_model t5 models mtfmodel model_dir path_to_mtf_small_t5_v1_1_checkpoint batch_size 1 tpu none vocab sentencepiecevocabulary path_to_mtf_small_spm_model_path extra_ids 100 score t5_model score inputs hello there targets hi i am vocabulary vocab for comparision run import t5 pip install t5 0 9 1 path_to_byt5_small_checkpoint fill_in t5_model t5 models mtfmodel model_dir path_to_tf_checkpoint batch_size 1 tpu none vocab t5 data bytevocabulary score t5_model score inputs hello there targets hi i am vocabulary vocab noqa
import tempfile import unittest import numpy as np import transformers from transformers import is_flax_available from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_sentencepiece, require_tokenizers, slow, ) from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp import optax from flax.core.frozen_dict import unfreeze from flax.training.common_utils import onehot from flax.traverse_util import flatten_dict from transformers import FLAX_MODEL_MAPPING, ByT5Tokenizer, T5Config, T5Tokenizer from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.models.t5.modeling_flax_t5 import ( FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model, shift_tokens_right, ) class FlaxT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): model = FlaxT5Model(config=config) result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)) def check_use_cache_forward_with_attn_mask( self, model_class_name, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(input_ids) decoder_attention_mask = jnp.ones_like(decoder_attention_mask) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, ) outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_flax class FlaxT5ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxT5Model, FlaxT5ForConditionalGeneration) if is_flax_available() else () all_generative_model_classes = (FlaxT5ForConditionalGeneration,) if is_flax_available() else () is_encoder_decoder = True def setUp(self): self.model_tester = FlaxT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_use_cache_forward_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, *config_and_inputs) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_shift_right(self): decoder_start_token_id = 0 pad_token_id = 1 labels = np.arange(2, 102).reshape(5, 20) labels[:2, 15:] = -100 decoder_input_ids = shift_tokens_right(labels, pad_token_id, decoder_start_token_id) np_decoder_input_ids = np.array(decoder_input_ids) padded_slice = np_decoder_input_ids[:2, (15 + 1) :] self.assertTrue((padded_slice == 1).all()) not_padded_slice = np_decoder_input_ids[2:, 1:] rolled_labels = np.roll(labels[2:], 1)[:, 1:] self.assertTrue((not_padded_slice == rolled_labels).all()) self.assertTrue((np_decoder_input_ids[:, 0] == 0).all()) def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) pt_model_class = getattr(transformers, base_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) pt_model_class = getattr(transformers, model_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params)) pt_model_class = getattr(transformers, model_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") class FlaxT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.seq_length = self.encoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = 0 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, is_encoder_decoder=False, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = FlaxT5EncoderModel(config=config) result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class FlaxT5EncoderOnlyModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxT5EncoderModel,) if is_flax_available() else () is_encoder_decoder = False def setUp(self): self.model_tester = FlaxT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) pt_model_class = getattr(transformers, base_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) pt_model_class = getattr(transformers, model_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params)) pt_model_class = getattr(transformers, model_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @require_sentencepiece @require_tokenizers @require_flax class FlaxT5ModelIntegrationTests(unittest.TestCase): @slow def test_small_integration_test(self): model = FlaxT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -19.0845 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): model = FlaxT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small") tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -59.0293 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): model = FlaxT5ForConditionalGeneration.from_pretrained("google/byt5-small") tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -60.7397 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_generation(self): model = FlaxT5ForConditionalGeneration.from_pretrained("t5-small") model.config.max_length = 8 model.config.num_beams = 1 model.config.do_sample = False tokenizer = T5Tokenizer.from_pretrained("t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="np").input_ids sequences = model.generate(input_ids).sequences output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_small_generation_bfloat16(self): model = FlaxT5ForConditionalGeneration.from_pretrained("t5-small", dtype=jnp.bfloat16) model.config.max_length = 8 model.config.num_beams = 1 model.config.do_sample = False tokenizer = T5Tokenizer.from_pretrained("t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="np").input_ids sequences = model.generate(input_ids).sequences output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_summarization(self): model = FlaxT5ForConditionalGeneration.from_pretrained("t5-base") tok = T5Tokenizer.from_pretrained("t5-base") FRANCE_ARTICLE = ( "Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( "(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( "New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) expected_summaries = [ 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a' " cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one" " magazine says . all 150 on board were killed in the crash .", "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a" " preliminary examination into the situation in the occupied Palestinian territory . as members of the" " court, Palestinians may be subject to counter-charges as well .", "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:" " the debate that has already begun since the announcement of the new framework will likely result in more" " heat than light . he says the new framework would reduce Iran's low-enriched uranium stockpile and cut" " centrifuges . miller: if it had been, there would have been no Iranian team at the table .", "prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two" ' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10' " times, with nine of her marriages occurring between 1999 and 2002 .", ] dct = tok( ["summarize: " + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], padding="max_length", truncation=True, return_tensors="np", ) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, do_sample=False, early_stopping=True, ).sequences decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( expected_summaries, decoded, )
codingutf8 2018 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for common tests for common tests make sure that lmlabels are correctly padded from the right add casaul pad token mask first item items before diagonal pad items after diagonal all items after square there should be numlayers key value embeddings stored in decoderpast there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoderpast tuple self parent assertequallenoutputs 4 first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical multiple next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice load state dict copies weights but does not tie them check that models has less parameters check that outputs are equal check that outputs after saving and loading are equal check that models has less parameters check that outputs are equal the small t5 model needs higher percentages for cpump tests qapipelinetests is not working well with slow tokenizers for some models and we don t want to touch the file srctransformersdataprocessorssquad py where this test fails for this model test that the model can be serialized and restored properly avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb check that gated gelu feed forward and different word embeddings work t5forsequenceclassification does not support inputsembeds explicitly pass decoderheadmask as it is required from t5 model when headmask specified we check the state of decoderattentions and crossattentions just from the last step for common tests for common tests origimport import acceleratemock unittest mock mock mock import of accelerate def importacceleratemockname args kwargs if name accelerate if accelerateavailable return acceleratemock else raise importerror return origimportname args kwargs load without using accelerate with unittest mock patchbuiltins import sideeffectimportacceleratemock accelerateavailable false model t5forconditionalgeneration frompretrainedt5small torchdtypetorch float16 self asserttruemodel decoder block0 layer2 densereludense wo weight dtype torch float32 self asserttruemodel decoder block0 layer2 densereludense wi weight dtype torch float16 load without in bf16 model t5forconditionalgeneration frompretrainedt5small torchdtypetorch bfloat16 self asserttruemodel decoder block0 layer2 densereludense wo weight dtype torch bfloat16 self asserttruemodel decoder block0 layer2 densereludense wi weight dtype torch bfloat16 load using accelerate in bf16 model t5forconditionalgeneration frompretrainedt5small torchdtypetorch bfloat16 devicemapauto self asserttruemodel decoder block0 layer2 densereludense wo weight dtype torch bfloat16 self asserttruemodel decoder block0 layer2 densereludense wi weight dtype torch bfloat16 load using accelerate in bf16 model t5forconditionalgeneration frompretrained t5small torchdtypetorch bfloat16 lowcpumemusagetrue self asserttruemodel decoder block0 layer2 densereludense wo weight dtype torch bfloat16 self asserttruemodel decoder block0 layer2 densereludense wi weight dtype torch bfloat16 load without using accelerate model t5forconditionalgeneration frompretrained t5small torchdtypetorch float16 lowcpumemusagetrue self asserttruemodel decoder block0 layer2 densereludense wo weight dtype torch float32 self asserttruemodel decoder block0 layer2 densereludense wi weight dtype torch float16 load using accelerate model t5forconditionalgeneration frompretrainedt5small torchdtypetorch float16 devicemapauto self asserttruemodel decoder block0 layer2 densereludense wo weight dtype torch float32 self asserttruemodel decoder block0 layer2 densereludense wi weight dtype torch float16 requiretorch requiresentencepiece requiretokenizers class t5modelintegrationtestsunittest testcase cachedproperty def modelself return t5forconditionalgeneration frompretrainedt5base totorchdevice cachedproperty def tokenizerself return t5tokenizer frompretrainedt5base slow def testtorchquantself r test that a simple torch quantization quantizedynamic call works on a t5 model for comparision run import t5 pip install t50 7 1 from t5 data sentencepiecevocabulary import sentencepiecevocabulary pathtomtfsmallt5checkpoint fillin pathtomtfsmallspmmodelpath fillin t5model t5 models mtfmodelmodeldirpathtomtfsmallt5checkpoint batchsize1 tpunone vocab sentencepiecevocabularypathtomtfsmallspmmodelpath extraids100 score t5model scoreinputshello there targetshi i am vocabularyvocab for comparision run import t5 pip install t50 7 1 from t5 data sentencepiecevocabulary import sentencepiecevocabulary pathtomtfsmallt5v11checkpoint fillin pathtomtfsmallspmmodelpath fillin t5model t5 models mtfmodelmodeldirpathtomtfsmallt5v11checkpoint batchsize1 tpunone vocab sentencepiecevocabularypathtomtfsmallspmmodelpath extraids100 score t5model scoreinputshello there targetshi i am vocabularyvocab for comparision run import t5 pip install t50 9 1 pathtobyt5smallcheckpoint fillin t5model t5 models mtfmodelmodeldirpathtotfcheckpoint batchsize1 tpunone vocab t5 data bytevocabulary score t5model scoreinputshello there targetshi i am vocabularyvocab outputs modelinputs numhiddenlayers is passed to t5config as numlayers numhiddenlayers is passed to t5config as numlayers coding utf 8 2018 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for common tests for common tests eos token t5 forces 100 extra tokens make sure that lm_labels are correctly padded from the right add casaul pad token mask first item items before diagonal pad items after diagonal all items after square there should be num_layers key value embeddings stored in decoder_past there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoder_past tuple self parent assertequal len outputs 4 first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice load state dict copies weights but does not tie them check that models has less parameters check that outputs are equal check that outputs after saving and loading are equal check that models has less parameters check that outputs are equal the small t5 model needs higher percentages for cpu mp tests qapipelinetests is not working well with slow tokenizers for some models and we don t want to touch the file src transformers data processors squad py where this test fails for this model to be sure we have no nan fstm still requires this hack fstm should probably be refactored similar to bart afterward test that the model can be serialized and restored properly avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb check that gated gelu feed forward and different word embeddings work t5forsequenceclassification does not support inputs_embeds explicitly pass decoder_head_mask as it is required from t5 model when head_mask specified we check the state of decoder_attentions and cross_attentions just from the last step for common tests for common tests a test to check whether the argument keep_in_fp32_modules correctly does its job mock import of accelerate load without using accelerate load without in bf16 load using accelerate in bf16 load using accelerate in bf16 load without using accelerate load using accelerate test that a simple torch quantization quantize_dynamic call works on a t5 model for comparision run import t5 pip install t5 0 7 1 from t5 data sentencepiece_vocabulary import sentencepiecevocabulary path_to_mtf_small_t5_checkpoint fill_in path_to_mtf_small_spm_model_path fill_in t5_model t5 models mtfmodel model_dir path_to_mtf_small_t5_checkpoint batch_size 1 tpu none vocab sentencepiecevocabulary path_to_mtf_small_spm_model_path extra_ids 100 score t5_model score inputs hello there targets hi i am vocabulary vocab for comparision run import t5 pip install t5 0 7 1 from t5 data sentencepiece_vocabulary import sentencepiecevocabulary path_to_mtf_small_t5_v1_1_checkpoint fill_in path_to_mtf_small_spm_model_path fill_in t5_model t5 models mtfmodel model_dir path_to_mtf_small_t5_v1_1_checkpoint batch_size 1 tpu none vocab sentencepiecevocabulary path_to_mtf_small_spm_model_path extra_ids 100 score t5_model score inputs hello there targets hi i am vocabulary vocab for comparision run import t5 pip install t5 0 9 1 path_to_byt5_small_checkpoint fill_in t5_model t5 models mtfmodel model_dir path_to_tf_checkpoint batch_size 1 tpu none vocab t5 data bytevocabulary score t5_model score inputs hello there targets hi i am vocabulary vocab noqa t5 base outputs model inputs num_hidden_layers is passed to t5config as num_layers num_hidden_layers is passed to t5config as num_layers
import copy import os import pickle import tempfile import unittest from transformers import T5Config, is_torch_available from transformers.models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES from transformers.testing_utils import ( require_accelerate, require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.utils import cached_property, is_torch_fx_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace if is_torch_available(): import torch from transformers import ( AutoTokenizer, ByT5Tokenizer, T5EncoderModel, T5ForConditionalGeneration, T5ForQuestionAnswering, T5ForSequenceClassification, T5Model, T5Tokenizer, ) from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST class T5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=7, is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def get_large_model_config(self): return T5Config.from_pretrained("t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size).clamp(2) input_ids[:, -1] = self.eos_token_id decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return T5Config( vocab_size=166, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config) model.to(torch_device) model.eval() lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) self.parent.assertEqual(len(decoder_past), config.num_layers) self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_with_sequence_classification_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = T5ForSequenceClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=input_ids, labels=labels, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).get_decoder().to(torch_device).eval() outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).get_decoder() model.to(torch_device) model.eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).get_decoder().to(torch_device).eval() outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5ForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [T5Model, T5ForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_resize_embeddings_t5_v1_1( self, config, ): prev_vocab_size = config.vocab_size config.tie_word_embeddings = False model = T5ForConditionalGeneration(config=config).to(torch_device).eval() model.resize_token_embeddings(prev_vocab_size - 10) self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict @require_torch class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (T5Model, T5ForConditionalGeneration, T5ForSequenceClassification, T5ForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (T5ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": T5ForConditionalGeneration, "feature-extraction": T5Model, "question-answering": T5ForQuestionAnswering, "summarization": T5ForConditionalGeneration, "text-classification": T5ForSequenceClassification, "text2text-generation": T5ForConditionalGeneration, "translation": T5ForConditionalGeneration, "zero-shot": T5ForSequenceClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (T5Model, T5ForConditionalGeneration) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = True test_model_parallel = True is_encoder_decoder = True model_split_percents = [0.8, 0.9] def setUp(self): self.model_tester = T5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) configs_no_init.return_dict = False for model_class in self.all_model_classes: if model_class.__name__ == "T5ForSequenceClassification": continue model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) self.clear_torch_jit_class_registry() def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (T5Model, T5ForConditionalGeneration, T5ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_config_and_model_silu_gated(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.feed_forward_proj = "gated-silu" self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_with_sequence_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_t5_v1_1(config) @slow def test_model_from_pretrained(self): for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = T5Model.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = T5Model(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/t5_test.onnx", export_params=True, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] max_length = config_and_inputs[1].shape[-1] + 3 model = T5ForConditionalGeneration(config).eval() model.to(torch_device) head_masking = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), } for attn_name, (name, mask) in zip(attention_names, head_masking.items()): head_masks = {name: mask} if name == "head_mask": head_masks["decoder_head_mask"] = torch.ones( config.num_decoder_layers, config.num_heads, device=torch_device ) out = model.generate( config_and_inputs[1], num_beams=1, max_length=max_length, output_attentions=True, return_dict_in_generate=True, **head_masks, ) attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload(self): pass @unittest.skip("Does not support conversations.") def test_pipeline_conversational(self): pass class T5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def get_large_model_config(self): return T5Config.from_pretrained("t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = T5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def create_and_check_model_fp16_forward( self, config, input_ids, attention_mask, ): model = T5EncoderModel(config=config).to(torch_device).half().eval() output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class T5EncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (T5EncoderModel,) if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_model_parallel = True all_parallelizable_model_classes = (T5EncoderModel,) if is_torch_available() else () def setUp(self): self.model_tester = T5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) @require_torch @require_accelerate @require_tokenizers @slow class T5ModelFp16Tests(unittest.TestCase): def test_fp16_fp32_conversion(self): r orig_import = __import__ accelerate_mock = unittest.mock.Mock() def import_accelerate_mock(name, *args, **kwargs): if name == "accelerate": if accelerate_available: return accelerate_mock else: raise ImportError return orig_import(name, *args, **kwargs) with unittest.mock.patch("builtins.__import__", side_effect=import_accelerate_mock): accelerate_available = False model = T5ForConditionalGeneration.from_pretrained("t5-small", torch_dtype=torch.float16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16) model = T5ForConditionalGeneration.from_pretrained("t5-small", torch_dtype=torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16) model = T5ForConditionalGeneration.from_pretrained("t5-small", torch_dtype=torch.bfloat16, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16) model = T5ForConditionalGeneration.from_pretrained( "t5-small", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16) model = T5ForConditionalGeneration.from_pretrained( "t5-small", torch_dtype=torch.float16, low_cpu_mem_usage=True ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16) model = T5ForConditionalGeneration.from_pretrained("t5-small", torch_dtype=torch.float16, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16) @require_torch @require_sentencepiece @require_tokenizers class T5ModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return T5ForConditionalGeneration.from_pretrained("t5-base").to(torch_device) @cached_property def tokenizer(self): return T5Tokenizer.from_pretrained("t5-base") @slow def test_torch_quant(self): r model_name = "google/flan-t5-small" tokenizer = T5Tokenizer.from_pretrained(model_name) model = T5ForConditionalGeneration.from_pretrained(model_name) model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8) input_text = "Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet?" input_ids = tokenizer(input_text, return_tensors="pt").input_ids _ = model.generate(input_ids) @slow def test_small_generation(self): model = T5ForConditionalGeneration.from_pretrained("t5-small").to(torch_device) model.config.max_length = 8 model.config.num_beams = 1 model.config.do_sample = False tokenizer = T5Tokenizer.from_pretrained("t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="pt").input_ids.to(torch_device) sequences = model.generate(input_ids) output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_small_integration_test(self): model = T5ForConditionalGeneration.from_pretrained("t5-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("t5-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -19.0845 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): model = T5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -59.0293 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): model = T5ForConditionalGeneration.from_pretrained("google/byt5-small").to(torch_device) tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -60.7397 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_summarization(self): model = self.model tok = self.tokenizer FRANCE_ARTICLE = ( "Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( "(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( "New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) expected_summaries = [ 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a' " cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one" " magazine says .", "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a" " preliminary examination into the situation in the occupied Palestinian territory . as members of the" " court, Palestinians may be subject to counter-charges as well .", "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:" " the debate that has already begun since the announcement of the new framework will likely result in more" " heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and" " implement a rigorous inspection regime .", "prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two" ' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10' " times, with nine of her marriages occurring between 1999 and 2002 .", ] use_task_specific_params(model, "summarization") dct = tok( [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( expected_summaries, decoded, ) @slow def test_translation_en_to_de(self): model = self.model tok = self.tokenizer use_task_specific_params(model, "translation_en_to_de") en_text = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.' expected_translation = ( '"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.' ) input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt") input_ids = input_ids.to(torch_device) output = model.generate(input_ids) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation) @slow def test_translation_en_to_fr(self): model = self.model tok = self.tokenizer use_task_specific_params(model, "translation_en_to_fr") en_text = ( ' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of' " countless generations of stars: the oldest stars are seen as blue dots. " ) input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt") input_ids = input_ids.to(torch_device) output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=100, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) new_truncated_translation = ( "Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre " "un " "« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées " "sous forme " "de points bleus." ) self.assertEqual(translation, new_truncated_translation) @slow def test_translation_en_to_ro(self): model = self.model tok = self.tokenizer use_task_specific_params(model, "translation_en_to_ro") en_text = "Taco Bell said it plans to add 2,000 locations in the US by 2022." expected_translation = "Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022." inputs = tok(model.config.prefix + en_text, return_tensors="pt").to(torch_device) output = model.generate(**inputs) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation) @slow def test_contrastive_search_t5(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) article = "summarize: " + article.strip() t5_tokenizer = AutoTokenizer.from_pretrained("flax-community/t5-base-cnn-dm") t5_model = T5ForConditionalGeneration.from_pretrained("flax-community/t5-base-cnn-dm").to(torch_device) input_ids = t5_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="pt" ).input_ids.to(torch_device) outputs = t5_model.generate(input_ids, penalty_alpha=0.5, top_k=5, max_length=64) generated_text = t5_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "Liana Barrientos has been married 10 times, nine of them in the Bronx. Her husbands filed for " "permanent residence after the marriages, prosecutors say." ], ) @require_torch class TestAsymmetricT5(unittest.TestCase): def build_model_and_check_forward_pass(self, **kwargs): tester = T5ModelTester(self, **kwargs) config, *inputs = tester.prepare_config_and_inputs() ( input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = inputs model = T5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) assert len(outputs) == 4 assert outputs["logits"].size() == (tester.batch_size, tester.decoder_seq_length, tester.vocab_size) assert outputs["loss"].size() == () return model def test_small_decoder(self): model = self.build_model_and_check_forward_pass(decoder_layers=1, num_hidden_layers=2) assert len(model.encoder.block) == 2 assert len(model.decoder.block) == 1 def test_defaulting_to_symmetry(self): model = self.build_model_and_check_forward_pass(num_hidden_layers=2) assert len(model.decoder.block) == len(model.encoder.block) == 2
codingutf8 2018 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license there should be numlayers key value embeddings stored in decoderpast1 there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoderpast1 tuple first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice createandcheckt5decodermodelpastlargeinputs has special inputs config inputids decoderinputids attentionmask and we have to prepare it correctly here todo fix headmasking according to pytorch t5 model this test is run in tft5encoderonlymodeltest where the main layer has the same inputs as the model for common tests for common tests is not able to be part of a pipeline two examples with different lengths to confirm that attention masks are operational in xla note due to the small numerical differences that are natural when we compile to xla sampling the same output out of the same seed is far from guaranteed we can however confirm that the results are sensible and that we can seed both versions forces the generation to happen on cpu to avoid gpurelated quirks seed set deterministic sampling sequence deterministic generation seed set deterministic sampling sequence deterministic generation forces the generation to happen on cpu to avoid gpurelated quirks todo ydshieh undo skip once a fix is done on tf side tests xla with task specific arguments two examples with different lengths to confirm that attention masks are operational in xla for comparision run import t5 pip install t50 7 1 from t5 data sentencepiecevocabulary import sentencepiecevocabulary pathtomtfsmallt5checkpoint fillin pathtomtfsmallspmmodelpath fillin t5model t5 models mtfmodelmodeldirpathtomtfsmallt5checkpoint batchsize1 tpunone vocab sentencepiecevocabularypathtomtfsmallspmmodelpath extraids100 score t5model scoreinputshello there targetshi i am vocabularyvocab for comparision run import t5 pip install t50 7 1 from t5 data sentencepiecevocabulary import sentencepiecevocabulary pathtomtfsmallt5v1 1checkpoint fillin pathtomtfsmallspmmodelpath fillin t5model t5 models mtfmodelmodeldirpathtomtfsmallt5v1 1checkpoint batchsize1 tpunone vocab sentencepiecevocabularypathtomtfsmallspmmodelpath extraids100 score t5model scoreinputshello there targetshi i am vocabularyvocab for comparision run import t5 pip install t50 9 1 pathtobyt5smallcheckpoint fillin t5model t5 models mtfmodelmodeldirpathtotfcheckpoint batchsize1 tpunone vocab t5 data bytevocabulary score t5model scoreinputshello there targetshi i am vocabularyvocab coding utf 8 2018 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license there should be num_layers key value embeddings stored in decoder_past 1 there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoder_past 1 tuple first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice create_and_check_t5_decoder_model_past_large_inputs has special inputs config input_ids decoder_input_ids attention_mask and we have to prepare it correctly here todo fix head masking according to pytorch t5 model this test is run in tft5encoderonlymodeltest where the main layer has the same inputs as the model for common tests for common tests is not able to be part of a pipeline two examples with different lengths to confirm that attention masks are operational in xla note due to the small numerical differences that are natural when we compile to xla sampling the same output out of the same seed is far from guaranteed we can however confirm that the results are sensible and that we can seed both versions forces the generation to happen on cpu to avoid gpu related quirks seed set deterministic sampling sequence deterministic generation seed set deterministic sampling sequence deterministic generation seed set deterministic sampling sequence deterministic generation forces the generation to happen on cpu to avoid gpu related quirks todo ydshieh undo skip once a fix is done on tf side tests xla with task specific arguments two examples with different lengths to confirm that attention masks are operational in xla for comparision run import t5 pip install t5 0 7 1 from t5 data sentencepiece_vocabulary import sentencepiecevocabulary path_to_mtf_small_t5_checkpoint fill_in path_to_mtf_small_spm_model_path fill_in t5_model t5 models mtfmodel model_dir path_to_mtf_small_t5_checkpoint batch_size 1 tpu none vocab sentencepiecevocabulary path_to_mtf_small_spm_model_path extra_ids 100 score t5_model score inputs hello there targets hi i am vocabulary vocab for comparision run import t5 pip install t5 0 7 1 from t5 data sentencepiece_vocabulary import sentencepiecevocabulary path_to_mtf_small_t5_v1 1_checkpoint fill_in path_to_mtf_small_spm_model_path fill_in t5_model t5 models mtfmodel model_dir path_to_mtf_small_t5_v1 1_checkpoint batch_size 1 tpu none vocab sentencepiecevocabulary path_to_mtf_small_spm_model_path extra_ids 100 score t5_model score inputs hello there targets hi i am vocabulary vocab for comparision run import t5 pip install t5 0 9 1 path_to_byt5_small_checkpoint fill_in t5_model t5 models mtfmodel model_dir path_to_tf_checkpoint batch_size 1 tpu none vocab t5 data bytevocabulary score t5_model score inputs hello there targets hi i am vocabulary vocab noqa
from __future__ import annotations import unittest from transformers import T5Config, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ByT5Tokenizer, T5Tokenizer, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model class TFT5ModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_labels = True self.vocab_size = 99 self.n_positions = 14 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.d_ff = 37 self.relative_attention_num_buckets = 8 self.dropout_rate = 0.1 self.initializer_factor = 0.002 self.eos_token_id = 1 self.pad_token_id = 0 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = T5Config( vocab_size=self.vocab_size, n_positions=self.n_positions, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, ) return (config, input_ids, input_mask, token_labels) def create_and_check_t5_model(self, config, input_ids, input_mask, token_labels): model = TFT5Model(config=config) inputs = { "input_ids": input_ids, "decoder_input_ids": input_ids, "decoder_attention_mask": input_mask, } result = model(inputs) result = model(input_ids, decoder_attention_mask=input_mask, decoder_input_ids=input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertListEqual(list(encoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertListEqual(list(decoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertEqual(len(decoder_past), config.num_layers) self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_t5_with_lm_head(self, config, input_ids, input_mask, token_labels): model = TFT5ForConditionalGeneration(config=config) inputs_dict = { "input_ids": input_ids, "decoder_input_ids": input_ids, "decoder_attention_mask": input_mask, } result = model(inputs_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_t5_decoder_model_past(self, config, input_ids, decoder_input_ids, attention_mask): model = TFT5Model(config=config).get_decoder() input_ids = input_ids[:1, :] self.batch_size = 1 outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids)[0] output_from_past = model(next_tokens, past_key_values=outputs.past_key_values)[0] random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_t5_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask ): model = TFT5Model(config=config).get_decoder() half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[0] output_from_past = model(next_tokens, past_key_values=outputs.past_key_values, attention_mask=attn_mask)[0] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).numpy().item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_t5_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask ): model = TFT5Model(config=config).get_decoder() input_ids = input_ids[:1, :] attention_mask = attention_mask[:1, :] self.batch_size = 1 outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=outputs.past_key_values )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, token_labels) = config_and_inputs inputs_dict = { "input_ids": input_ids, "decoder_input_ids": input_ids, "decoder_attention_mask": input_mask, } return config, inputs_dict @require_tf class TFT5ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): is_encoder_decoder = True all_model_classes = (TFT5Model, TFT5ForConditionalGeneration) if is_tf_available() else () all_generative_model_classes = (TFT5ForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFT5ForConditionalGeneration, "feature-extraction": TFT5Model, "summarization": TFT5ForConditionalGeneration, "text2text-generation": TFT5ForConditionalGeneration, "translation": TFT5ForConditionalGeneration, } if is_tf_available() else {} ) test_onnx = False def setUp(self): self.model_tester = TFT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_t5_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_model(*config_and_inputs) def test_t5_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_t5_model(config, *config_and_inputs[1:]) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_with_lm_head(*config_and_inputs) def test_t5_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_decoder_model_past(*config_and_inputs) def test_t5_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_decoder_model_attention_mask_past(*config_and_inputs) def test_t5_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config, input_ids, input_mask, token_labels = config_and_inputs config_and_inputs = (config, input_ids, None, input_mask) self.model_tester.create_and_check_t5_decoder_model_past_large_inputs(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFT5Model.from_pretrained("t5-small") self.assertIsNotNone(model) def test_generate_with_headmasking(self): pass @unittest.skip(reason="The inputs of the Main Layer are different.") def test_keras_save_load(self): pass @unittest.skip("Does not support conversations.") def test_pipeline_conversational(self): pass class TFT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = TFT5EncoderModel(config=config) result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class TFT5EncoderOnlyModelTest(TFModelTesterMixin, unittest.TestCase): is_encoder_decoder = False all_model_classes = (TFT5EncoderModel,) if is_tf_available() else () test_onnx = False def setUp(self): self.model_tester = TFT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_train_pipeline_custom_model(self): pass @require_tf @require_sentencepiece @require_tokenizers class TFT5GenerationIntegrationTests(unittest.TestCase): @slow def test_greedy_xla_generate_simple(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") sentences = [ "Translate English to German: Today is a beautiful day.", "Translate English to German: I have four cats, three dogs, two birds, and a horse.", ] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids xla_generate = tf.function(model.generate, jit_compile=True) output_ids = model.generate(input_ids) output_ids_xla = xla_generate(input_ids) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True) expected_output_string = [ "Heute ist ein schöner Tag.", "Ich habe vier Katzen, drei Hunde, zwei Vögel und ein Pferd.", ] self.assertListEqual(expected_output_string, output_strings) self.assertListEqual(expected_output_string, output_strings_xla) @slow def test_greedy_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") sentences = ["Yesterday, my name was", "Today is a beautiful day and"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids generation_kwargs = { "bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids], "no_repeat_ngram_size": 3, "do_sample": False, "repetition_penalty": 2.2, } output_ids = model.generate(input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = ["Yesterday, my name was", "Heute ist ein schöne Tag und"] self.assertListEqual(expected_output_string, output_strings) @slow def test_sample_xla_generate_simple(self): with tf.device(":/CPU:0"): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") sentence = "Translate English to German: I have two bananas" input_ids = tokenizer(sentence, return_tensors="tf", padding=True).input_ids expected_output_string = ["Ich habe zwei Bananen"] expected_output_string_xla = ["Ich habe 2 Bananen"] output_ids = model.generate(input_ids, do_sample=True, seed=[42, 0]) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(expected_output_string, output_strings) xla_generate = tf.function(model.generate, jit_compile=True) output_ids_xla = xla_generate(input_ids, do_sample=True, seed=[42, 0]) output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True) self.assertListEqual(expected_output_string_xla, output_strings_xla) @slow def test_sample_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids generation_kwargs = { "do_sample": True, "bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids], "no_repeat_ngram_size": 3, "repetition_penalty": 2.2, "temperature": 0.8, "top_k": 500, "top_p": 0.9, "seed": [20, 0], } with tf.device(":/CPU:0"): output_ids = model.generate(input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = ["- I really love my way of this.", "die Transformatoren sind wirklich erstaunlich"] self.assertListEqual(expected_output_string, output_strings) @unittest.skip("Skip for now as TF 2.13 breaks it on GPU") @slow def test_beam_search_xla_generate_simple(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_fr", {}) model.config.update(translation_config) sentences = [ model.config.prefix + "Today is a beautiful day.", model.config.prefix + "I have four cats, three dogs, two birds, and a horse.", ] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids xla_generate = tf.function(model.generate, jit_compile=True) output_ids = model.generate(input_ids, num_beams=2) output_ids_xla = xla_generate(input_ids, num_beams=2) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True) expected_output_string = [ "Aujourd'hui est une belle journée.", "J'ai quatre chats, trois chiens, deux oiseaux et un cheval.", ] self.assertListEqual(expected_output_string, output_strings) self.assertListEqual(expected_output_string, output_strings_xla) @slow def test_beam_search_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids generation_kwargs = { "bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids], "no_repeat_ngram_size": 3, "do_sample": False, "repetition_penalty": 2.2, "num_beams": 4, } output_ids = model.generate(input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = ["Ich liebe es so sehr!", "die Transformatoren sind wirklich erstaunlich"] self.assertListEqual(expected_output_string, output_strings) @unittest.skip("Does not support conversations.") def test_pipeline_conversational(self): pass @require_tf @require_sentencepiece @require_tokenizers class TFT5ModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return TFT5ForConditionalGeneration.from_pretrained("t5-base") @slow def test_small_integration_test(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_mean(loss).numpy() EXPECTED_SCORE = -4.771147 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): model = TFT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small") tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_mean(loss).numpy() EXPECTED_SCORE = -14.757326 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): model = TFT5ForConditionalGeneration.from_pretrained("google/byt5-small") tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_mean(loss).numpy() EXPECTED_SCORE = -7.592465 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_summarization(self): model = self.model tok = T5Tokenizer.from_pretrained("t5-base") FRANCE_ARTICLE = ( "Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( "(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( "New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) expected_summaries = [ 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a' " cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one" " magazine says .", "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a" " preliminary examination into the situation in the occupied Palestinian territory . as members of the" " court, Palestinians may be subject to counter-charges as well .", "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:" " the debate that has already begun since the announcement of the new framework will likely result in more" " heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and" " implement a rigorous inspection regime .", "prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two" ' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10' " times, with nine of her marriages occurring between 1999 and 2002 .", ] task_specific_config = getattr(model.config, "task_specific_params", {}) summarization_config = task_specific_config.get("summarization", {}) model.config.update(summarization_config) dct = tok( [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], max_length=512, padding="max_length", truncation=True, return_tensors="tf", ) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = [ tok.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in hypotheses_batch ] self.assertListEqual( expected_summaries, decoded, ) @slow def test_translation_en_to_de(self): tok = T5Tokenizer.from_pretrained("t5-base") model = self.model task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_de", {}) self.model.config.update(translation_config) original_input = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.' expected_translation = ( '"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.' ) input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf") output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=50, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation) @slow def test_translation_en_to_fr(self): model = self.model tok = T5Tokenizer.from_pretrained("t5-base") task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_fr", {}) model.config.update(translation_config) en_text = ( ' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of' " countless generations of stars: the oldest stars are seen as blue dots. " ) new_truncated_translation = ( "Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre " "un " "« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées " "sous forme " "de points bleus." ) input_ids = tok(model.config.prefix + en_text, return_tensors="tf").input_ids output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=100, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, new_truncated_translation) @slow def test_translation_en_to_ro(self): model = self.model tok = T5Tokenizer.from_pretrained("t5-base") task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_ro", {}) model.config.update(translation_config) original_input = "Taco Bell said it plans to add 2,000 locations in the US by 2022." expected_translation = "Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022." input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf") output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=50, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation)
codingutf8 2018 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token s tokenid 1 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 unk self assertequalvocabkeys1 s self assertequalvocabkeys1100 pad self assertequallenvocabkeys 1101 def testvocabsizeself self assertequalself gettokenizer vocabsize 1000 self assertequallenself gettokenizer 1101 def testfulltokenizerself tokenizer t5tokenizersamplevocab tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a t est self assertlistequaltokenizer converttokenstoidstokens 285 46 10 170 382 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s ids tokenizer converttokenstoidstokens self assertlistequalids 8 21 84 55 24 19 7 0 602 347 347 347 3 12 66 46 72 80 6 0 4 backtokens tokenizer convertidstotokensids self assertlistequal backtokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline unk 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s unk cachedproperty def t5basetokenizerself return t5tokenizer frompretrainedt5base cachedproperty def t5basetokenizerfastself return t5tokenizerfast frompretrainedt5base def gettokenizerself kwargs t5tokenizer return self tokenizerclass frompretrainedself tmpdirname kwargs def getrusttokenizerself kwargs t5tokenizerfast return self rusttokenizerclass frompretrainedself tmpdirname kwargs def testrustandpythonfulltokenizersself if not self testrusttokenizer return tokenizer self gettokenizer rusttokenizer self getrusttokenizer sequence i was born in 92000 and this is fals tokens tokenizer tokenizesequence rusttokens rusttokenizer tokenizesequence self assertlistequaltokens rusttokens ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids rusttokenizer self getrusttokenizer ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids def testeostreatmentself tokenizer self t5basetokenizer batchwitheosadded tokenizerhis i went to the gyms s batchwithouteosadded tokenizerhi i went to the gym self assertlistequalbatchwitheosaddedinputids batchwithouteosaddedinputids def testpreparebatchself tokenizer self t5basetokenizer srctext a long paragraph for summarization another paragraph for summarization expectedsrctokens 71 307 8986 21 4505 1635 1707 5 tokenizer eostokenid batch tokenizersrctext paddingtrue returntensorsframework self assertisinstancebatch batchencoding if framework jax result listbatch inputids numpy0 else result listbatch inputids tolist0 self assertlistequalexpectedsrctokens result self assertequal2 9 batch inputids shape self assertequal2 9 batch attentionmask shape def testemptytargettextself tokenizer self t5basetokenizer srctext a long paragraph for summarization another paragraph for summarization batch tokenizersrctext paddingtrue returntensorsframework check if inputids are returned and no decoderinputids self assertininputids batch self assertinattentionmask batch self assertnotindecoderinputids batch self assertnotindecoderattentionmask batch def testmaxlengthself tokenizer self t5basetokenizer tgttext summary of the text another summary targets tokenizer texttargettgttext maxlength32 paddingmaxlength truncationtrue returntensorsframework self assertequal32 targetsinputids shape1 def testoutputsnotlongerthanmaxlenself tokenizer self t5basetokenizer batch tokenizer i am a small frog 1000 i am a small frog paddingtrue truncationtrue returntensorsframework self assertisinstancebatch batchencoding since t5 does not have a max input length this test should be changed to the following in transformers v5 self assertequalbatch inputids shape 2 8001 self assertequalbatch inputids shape 2 512 def testeosininputself tokenizer self t5basetokenizer srctext a long paragraph for summarization s tgttext summary of the text s expectedsrctokens 71 307 8986 21 4505 1635 1707 5 1 expectedtgttokens 20698 13 8 1499 5 1 batch tokenizersrctext texttargettgttext self assertequalexpectedsrctokens batchinputids0 self assertequalexpectedtgttokens batchlabels0 def testtokentypeidsself srctext1 a first paragraph for summarization srctext2 a second paragraph for summarization fasttokentypeids self t5basetokenizerfast srctext1 srctext2 addspecialtokenstrue returntokentypeidstrue tokentypeids slowtokentypeids self t5basetokenizer srctext1 srctext2 addspecialtokenstrue returntokentypeidstrue tokentypeids self assertequalslowtokentypeids fasttokentypeids self assertequallenslowtokentypeids0 18 def testfastandslowsameresultself srctext pad today is unk nice day s tgtids 0 1960 19 2 1245 239 1 tgttext pad today isunk nice days fastids self t5basetokenizerfastsrctext addspecialtokensfalse inputids slowids self t5basetokenizersrctext addspecialtokensfalse inputids self assertequaltgtids fastids self assertequaltgtids slowids fasttext self t5basetokenizerfast decodefastids slowtext self t5basetokenizer decodefastids self assertequaltgttext fasttext self assertequaltgttext slowtext def testspecialtokensinitializationself for tokenizer pretrainedname kwargs in self tokenizerslist with self subtestftokenizer class name pretrainedname addedtokens fextraidi for i in range100 addedtokenspecial lstriptrue tokenizerr self rusttokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs tokenizercr self rusttokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs fromslowtrue tokenizerp self tokenizerclass frompretrained pretrainedname additionalspecialtokensaddedtokens kwargs poutput tokenizerp encodehey this is a special token routput tokenizerr encodehey this is a special token croutput tokenizercr encodehey this is a special token specialtokenid tokenizerr encodespecial addspecialtokensfalse0 self assertequalpoutput routput self assertequalcroutput routput self asserttruespecialtokenid in poutput self asserttruespecialtokenid in routput self asserttruespecialtokenid in croutput def testspecialtokensinitializationwithnonemptyadditionalspecialtokensself tokenizerlist if self testslowtokenizer tokenizerlist appendself tokenizerclass self gettokenizer if self testrusttokenizer tokenizerlist appendself rusttokenizerclass self getrusttokenizer for tokenizerclass tokenizerutils in tokenizerlist with tempfile temporarydirectory as tmpdir tokenizerutils savepretrainedtmpdir with openos path jointmpdir specialtokensmap json encodingutf8 as jsonfile specialtokensmap json loadjsonfile with openos path jointmpdir tokenizerconfig json encodingutf8 as jsonfile tokenizerconfig json loadjsonfile addedtokensextraids fextraidi for i in range100 specialtokensmapadditionalspecialtokens addedtokensextraids anadditionalspecialtoken tokenizerconfigadditionalspecialtokens addedtokensextraids anadditionalspecialtoken with openos path jointmpdir specialtokensmap json w encodingutf8 as outfile json dumpspecialtokensmap outfile with openos path jointmpdir tokenizerconfig json w encodingutf8 as outfile json dumptokenizerconfig outfile the following checks allow us to verify that our test works as expected i e that the tokenizer takes into account the new value of additionalspecialtokens given in the tokenizerconfig json and specialtokensmap json files tokenizerwithoutchangeininit tokenizerclass frompretrained tmpdir self assertin anadditionalspecialtoken tokenizerwithoutchangeininit additionalspecialtokens self assertinanadditionalspecialtoken tokenizerwithoutchangeininit getvocab byt5tokenization no vocab self assertequal anadditionalspecialtoken tokenizerwithoutchangeininit convertidstotokens tokenizerwithoutchangeininit converttokenstoidsanadditionalspecialtoken now we test that we can change the value of additionalspecialtokens in the frompretrained newaddedtokens addedtokensextraids addedtokenanewadditionalspecialtoken lstriptrue tokenizer tokenizerclass frompretrained tmpdir additionalspecialtokensnewaddedtokens self assertinanewadditionalspecialtoken tokenizer additionalspecialtokens self assertequal anewadditionalspecialtoken tokenizer convertidstotokens tokenizer converttokenstoidsanewadditionalspecialtoken overwritten from testtokenizationcommon since t5 has no max length def testpretrainedmodellistsself we should have at least one default checkpoint for each tokenizer we should specify the max input length as well used in some part to list the pretrained checkpoints self assertgreaterequallenself tokenizerclass pretrainedvocabfilesmap 1 self assertgreaterequallenlistself tokenizerclass pretrainedvocabfilesmap values0 1 slow def testtokenizerintegrationself expectedencoding inputids 31220 7 41 14034 801 38 3 102 63 17 127 524 18 7031 2032 277 11 3 102 63 17 127 524 18 2026 17 10761 18 7041 61 795 879 18 19681 4648 7 41 12920 382 6 350 6383 4949 6 2158 12920 382 9 6 3 4 11160 6 2043 17153 279 49 17 6 3 4 434 9688 11439 21 6869 10509 17725 41 567 9138 61 11 6869 10509 11946 41 18207 517 61 28 147 3538 1220 7140 10761 2250 16 910 1220 8024 11 1659 1413 32 883 2020 344 2215 226 6 12901 382 127 524 11 4738 7 127 15390 5 1 272 24203 19 876 12 554 18 9719 1659 2647 26352 6497 7 45 73 9339 400 26 1499 57 22801 10760 30 321 646 11 269 2625 16 66 7500 5 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 37 1704 4216 3 20400 4418 7 147 8 19743 1782 5 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamet5base revision5a7ff2d8f5117c194c7e32ec1ccbf04642cca99b def testgetsentineltokensself tokenizer t5tokenizersamplevocab extraids10 sentineltokens tokenizer getsentineltokens self assertequallensentineltokens 10 self assertlistequalsortedsentineltokens sortedfextraidstri for i in range0 10 self asserttruere searchrextraidd token is not none for token in sentineltokens def testgetsentineltokenidsself tokenizer t5tokenizersamplevocab extraids10 self assertlistequalsortedtokenizer getsentineltokenids sortedrange1000 1010 def testgetsentineltokensforfasttokenizerself tokenizer t5tokenizerfastsamplevocab extraids10 sentineltokens tokenizer getsentineltokens self assertequallensentineltokens 10 self assertlistequalsortedsentineltokens sortedfextraidstri for i in range0 10 self asserttruere searchrextraidd token is not none for token in sentineltokens def testgetsentineltokenidsforfasttokenizerself tokenizer t5tokenizerfastsamplevocab extraids10 self assertlistequalsortedtokenizer getsentineltokenids sortedrange1000 1010 def testsomeedgecasesself tokenizer t5tokenizer frompretrainedt5base legacyfalse sptokens tokenizer spmodel encodes outtypestr self assertequalsptokens s tokens tokenizer tokenizes self assertnotequalsptokens tokens self assertequaltokens s tokens tokenizer tokenize self assertequaltokens self assertequaltokens tokenizer spmodel encode outtypestr tokens tokenizer tokenize self assertequaltokens self assertequaltokens tokenizer spmodel encode outtypestr tokens tokenizer tokenize self assertequaltokens self assertequaltokens tokenizer spmodel encode outtypestr tokens tokenizer tokenize self assertequaltokens self assertequaltokens tokenizer spmodel encode outtypestr requiresentencepiece requiretokenizers class commonspmintegrationtestsunittest testcase classmethod def setupclasscls tokenizer t5tokenizersamplevocab extraids0 legacyfalse tokenizer addspecialtokens additionalspecialtokens addedtokenextraid0 rstripfalse lstripfalse todo arthurz the above is necessary as addedtokens intialization sucks trie is not correctly created so the extra ids are split cls tokenizer tokenizer def testadddummyprefixself make sure is prepended and outputs match spmodel s sentencepiece normalizerspec adddummyprefix attribute inputids self tokenizer encode hello addspecialtokensfalse self assertequalinputids 7 4 156 86 20 spencode self tokenizer spmodel encode hello self assertequalinputids 7 spencode tokens self tokenizer tokenize hello self assertequaltokens he ll o tokens self tokenizer tokenize self assertequaltokens self assertequaltokens self tokenizer spmodel encode outtypestr tokens self tokenizer tokenize self assertequaltokens self assertequaltokens self tokenizer spmodel encode outtypestr tokens self tokenizer tokenize self assertequaltokens self assertequaltokens self tokenizer spmodel encode outtypestr def testremoveextrawhitespacesself make sure the extra spaces are eaten sentencepiece normalizerspec removeextrawhitespaces attribute inputids self tokenizer encode hello addspecialtokensfalse self assertequalinputids 7 4 156 86 20 spencode self tokenizer spmodel encode hello self assertequalinputids 7 spencode tokens self tokenizer tokenize hello self assertequaltokens he ll o is also a whitespace inputids self tokenizer encodehe is not self assertequalinputids 156 46 44 2 tokens self tokenizer tokenizehe is not self assertequaltokens he is not no extra space added inputids self tokenizer encodehe is notextraid0 he here t5x does not eat with lstrip so there is and extra he in the original one self assertequalinputids 156 46 44 1001 156 2 tokens self tokenizer tokenizehe is notextraid0 he self assertequaltokens he is not extraid0 he spaces are eaten by spm make sure that the output after the extra id is the same as if extraid was not there inputids self tokenizer encodehe is not he self assertequalinputids 156 46 44 156 2 tokens self tokenizer tokenizehe is not he self assertequaltokens he is not he spaces are eaten by spm even if not start def testcharacterafterspecialtokenself make sure that tokenizer tokenize is similar to adding the equivalent special token to the vocab inputids self tokenizer encodehey extraid0i self assertequalinputids 156 30 1001 100 2 tokens self tokenizer tokenizehey extraid0i self assertequaltokens he y extraid0 i inputids self tokenizer encodehello extraid0 self assertequalinputids 156 86 20 3 1001 3 2 tokens self tokenizer tokenizehello extraid0 self assertequaltokens he ll o extraid0 def testspecialtokensstripself inputids self tokenizer encode extraid0 self assertequalinputids 1001 7 3 2 tokens self tokenizer tokenize extraid0 spaces are not longer eaten by rstrip and lstrip self assertequaltokens extraid0 test with a begin of word like he inputids self tokenizer encodeno extraid0 he self assertequalinputids 284 1001 156 2 spaces are eaten by rstrip lstrip so this is expected don t strip otherwise you break tokens self tokenizer tokenizeno extraid0 he self assertequaltokens no extraid0 he make sure this does not happen if we don t strip tokenizer t5tokenizersamplevocab extraids0 tokenizer addspecialtokensbostoken addedtokenbos inputids tokenizer encodeno bos he self assertequalinputids 284 1001 156 2 tokens tokenizer tokenizeno bos he the first after no is eaten by spm self assertequaltokenizer spmodel encodeno outtypestr no self assertequaltokens no bos he requireseqio unittest skipif os getenvruntokenizerintegration 0 0 runtokenizerintegration1 to run tokenizer integration tests def testintegrationseqioself from datasets import loaddataset from seqio import sentencepiecevocabulary ds loaddatasetxnli alllanguages splittraintestvalidation todo arthurzucker fix the 3 commented tests with 23909 inputtexts bonjour extraid0 bonjourextraid0 this will fail in t5 the special token has to be at the end because in t5 they add extraid0 to the vocab not extraid0 hey extraid0i love you hey extraid0 i love you this will fail we strip left to i vs i hey extraid0he this will fail for the same reason we replace then strip import tqdm test with umt5 vocabpath gs t5datavocabsumt5 256000sentencepiece model t5xtokenizer sentencepiecevocabularyvocabpath extraids300 hftokenizer t5tokenizer frompretrainedgoogleumt5small legacyfalse for text in inputtexts self assertequal hftokenizer encodetext addspecialtokensfalse t5xtokenizer tokenizer tokenizetext ftext for texts in tqdm tqdmdspremise for text in texts self assertequal hftokenizer encodetext addspecialtokensfalse t5xtokenizer tokenizer tokenizetext ftext test with t5 hftokenizer t5tokenizer frompretrainedt5small vocabpath gs t5datavocabsccall 32000sentencepiece model t5xtokenizer sentencepiecevocabularyvocabpath extraids300 for text in inputtexts self assertequal hftokenizer encodetext addspecialtokensfalse t5xtokenizer tokenizer tokenizetext ftext for texts in tqdm tqdmdspremise for text in texts self assertequal hftokenizer encodetext addspecialtokensfalse t5xtokenizer tokenizer tokenizetext ftext coding utf 8 2018 google t5 s and huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token check if input_ids are returned and no decoder_input_ids since t5 does not have a max input length this test should be changed to the following in transformers v5 self assertequal batch input_ids shape 2 8001 the following checks allow us to verify that our test works as expected i e that the tokenizer takes into account the new value of additional_special_tokens given in the tokenizer_config json and special_tokens_map json files self assertin an_additional_special_token tokenizer_without_change_in_init get_vocab byt5tokenization no vocab now we test that we can change the value of additional_special_tokens in the from_pretrained overwritten from test_tokenization_common since t5 has no max length we should have at least one default checkpoint for each tokenizer we should specify the max input length as well used in some part to list the pretrained checkpoints fmt skip a class that regroups important test to make sure that we properly handle the special tokens todo arthurz the above is necessary as addedtokens intialization sucks trie is not correctly created so the extra ids are split make sure is prepended and outputs match sp_model s sentencepiece normalizerspec add_dummy_prefix attribute make sure the extra spaces are eaten sentencepiece normalizerspec remove_extra_whitespaces attribute is also a whitespace no extra space added here t5x does not eat with lstrip so there is and extra he in the original one spaces are eaten by spm make sure that the output after the extra id is the same as if extra_id was not there spaces are eaten by spm even if not start make sure that tokenizer tokenize is similar to adding the equivalent special token to the vocab spaces are not longer eaten by rstrip and lstrip test with a begin of word like he spaces are eaten by rstrip lstrip so this is expected don t strip otherwise you break make sure this does not happen if we don t strip the first after no is eaten by spm todo arthurzucker fix the 3 commented tests with 23909 bonjour extra_id_0 this will fail in t5 the special token has to be at the end because in t5 they add _ extra_id_0 to the vocab not extra_id_0 hey extra_id_0 i love you this will fail we strip left to _i vs i hey extra_id_0 he this will fail for the same reason we replace _ then strip test with umt5 test with t5
import json import os import re import tempfile import unittest from transformers import SPIECE_UNDERLINE, AddedToken, BatchEncoding, T5Tokenizer, T5TokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_seqio, require_tokenizers, slow from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" @require_sentencepiece @require_tokenizers class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = T5Tokenizer rust_tokenizer_class = T5TokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() tokenizer = T5Tokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[1100], "<pad>") self.assertEqual(len(vocab_keys), 1_101) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1000) self.assertEqual(len(self.get_tokenizer()), 1101) def test_full_tokenizer(self): tokenizer = T5Tokenizer(SAMPLE_VOCAB) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def t5_base_tokenizer(self): return T5Tokenizer.from_pretrained("t5-base") @cached_property def t5_base_tokenizer_fast(self): return T5TokenizerFast.from_pretrained("t5-base") def get_tokenizer(self, **kwargs) -> T5Tokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs) -> T5TokenizerFast: return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_eos_treatment(self): tokenizer = self.t5_base_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_prepare_batch(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, tokenizer.eos_token_id] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length(self): tokenizer = self.t5_base_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_outputs_not_longer_than_maxlen(self): tokenizer = self.t5_base_tokenizer batch = tokenizer( ["I am a small frog" * 1000, "I am a small frog"], padding=True, truncation=True, return_tensors=FRAMEWORK ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 512)) def test_eos_in_input(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, 1] expected_tgt_tokens = [20698, 13, 8, 1499, 5, 1] batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) def test_token_type_ids(self): src_text_1 = ["A first paragraph for summarization."] src_text_2 = ["A second paragraph for summarization."] fast_token_type_ids = self.t5_base_tokenizer_fast( src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True ).token_type_ids slow_token_type_ids = self.t5_base_tokenizer( src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True ).token_type_ids self.assertEqual(slow_token_type_ids, fast_token_type_ids) self.assertEqual(len(slow_token_type_ids[0]), 18) def test_fast_and_slow_same_result(self): src_text = "<pad> Today is <unk> nice day </s>" tgt_ids = [0, 1960, 19, 2, 1245, 239, 1] tgt_text = "<pad> Today is<unk> nice day</s>" fast_ids = self.t5_base_tokenizer_fast(src_text, add_special_tokens=False).input_ids slow_ids = self.t5_base_tokenizer(src_text, add_special_tokens=False).input_ids self.assertEqual(tgt_ids, fast_ids) self.assertEqual(tgt_ids, slow_ids) fast_text = self.t5_base_tokenizer_fast.decode(fast_ids) slow_text = self.t5_base_tokenizer.decode(fast_ids) self.assertEqual(tgt_text, fast_text) self.assertEqual(tgt_text, slow_text) def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [f"<extra_id_{i}>" for i in range(100)] + [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") r_output = tokenizer_r.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in r_output) self.assertTrue(special_token_id in cr_output) def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(100)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_pretrained_model_lists(self): self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[31220, 7, 41, 14034, 801, 38, 3, 102, 63, 17, 127, 524, 18, 7031, 2032, 277, 11, 3, 102, 63, 17, 127, 524, 18, 2026, 17, 10761, 18, 7041, 61, 795, 879, 18, 19681, 4648, 7, 41, 12920, 382, 6, 350, 6383, 4949, 6, 2158, 12920, 382, 9, 6, 3, 4, 11160, 6, 2043, 17153, 279, 49, 17, 6, 3, 4, 434, 9688, 11439, 21, 6869, 10509, 17725, 41, 567, 9138, 61, 11, 6869, 10509, 11946, 41, 18207, 517, 61, 28, 147, 3538, 1220, 7140, 10761, 2250, 16, 910, 1220, 8024, 11, 1659, 1413, 32, 883, 2020, 344, 2215, 226, 6, 12901, 382, 127, 524, 11, 4738, 7, 127, 15390, 5, 1], [272, 24203, 19, 876, 12, 554, 18, 9719, 1659, 2647, 26352, 6497, 7, 45, 73, 9339, 400, 26, 1499, 57, 22801, 10760, 30, 321, 646, 11, 269, 2625, 16, 66, 7500, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [37, 1704, 4216, 3, 20400, 4418, 7, 147, 8, 19743, 1782, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="t5-base", revision="5a7ff2d8f5117c194c7e32ec1ccbf04642cca99b", ) def test_get_sentinel_tokens(self): tokenizer = T5Tokenizer(SAMPLE_VOCAB, extra_ids=10) sentinel_tokens = tokenizer.get_sentinel_tokens() self.assertEqual(len(sentinel_tokens), 10) self.assertListEqual(sorted(sentinel_tokens), sorted([f"<extra_id_{str(i)}>" for i in range(0, 10)])) self.assertTrue([re.search(r"<extra_id_\d+>", token) is not None for token in sentinel_tokens]) def test_get_sentinel_token_ids(self): tokenizer = T5Tokenizer(SAMPLE_VOCAB, extra_ids=10) self.assertListEqual(sorted(tokenizer.get_sentinel_token_ids()), sorted(range(1000, 1010))) def test_get_sentinel_tokens_for_fasttokenizer(self): tokenizer = T5TokenizerFast(SAMPLE_VOCAB, extra_ids=10) sentinel_tokens = tokenizer.get_sentinel_tokens() self.assertEqual(len(sentinel_tokens), 10) self.assertListEqual(sorted(sentinel_tokens), sorted([f"<extra_id_{str(i)}>" for i in range(0, 10)])) self.assertTrue([re.search(r"<extra_id_\d+>", token) is not None for token in sentinel_tokens]) def test_get_sentinel_token_ids_for_fasttokenizer(self): tokenizer = T5TokenizerFast(SAMPLE_VOCAB, extra_ids=10) self.assertListEqual(sorted(tokenizer.get_sentinel_token_ids()), sorted(range(1000, 1010))) def test_some_edge_cases(self): tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False) sp_tokens = tokenizer.sp_model.encode("</s>>", out_type=str) self.assertEqual(sp_tokens, ["<", "/", "s", ">", ">"]) tokens = tokenizer.tokenize("</s>>") self.assertNotEqual(sp_tokens, tokens) self.assertEqual(tokens, ["</s>", ">"]) tokens = tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("", out_type=str)) tokens = tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode(" ", out_type=str)) tokens = tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str)) tokens = tokenizer.tokenize(" ▁") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str)) @require_sentencepiece @require_tokenizers class CommonSpmIntegrationTests(unittest.TestCase): @classmethod def setUpClass(cls): tokenizer = T5Tokenizer(SAMPLE_VOCAB, extra_ids=0, legacy=False) tokenizer.add_special_tokens( {"additional_special_tokens": [AddedToken("<extra_id_0>", rstrip=False, lstrip=False)]} ) cls.tokenizer = tokenizer def test_add_dummy_prefix(self): input_ids = self.tokenizer.encode(". Hello", add_special_tokens=False) self.assertEqual(input_ids, [7, 4, 156, 86, 20]) sp_encode = self.tokenizer.sp_model.encode(". Hello") self.assertEqual(input_ids, [7] + sp_encode) tokens = self.tokenizer.tokenize(". Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) tokens = self.tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str)) tokens = self.tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str)) tokens = self.tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("▁", out_type=str)) def test_remove_extra_whitespaces(self): input_ids = self.tokenizer.encode(" . Hello", add_special_tokens=False) self.assertEqual(input_ids, [7, 4, 156, 86, 20]) sp_encode = self.tokenizer.sp_model.encode(" . Hello") self.assertEqual(input_ids, [7] + sp_encode) tokens = self.tokenizer.tokenize(" . Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) input_ids = self.tokenizer.encode("▁He is not") self.assertEqual(input_ids, [156, 46, 44, 2]) tokens = self.tokenizer.tokenize("▁He is not") self.assertEqual(tokens, ["▁He", "▁is", "▁not"]) input_ids = self.tokenizer.encode("▁He is not<extra_id_0> ▁He") self.assertEqual(input_ids, [156, 46, 44, 1001, 156, 2]) tokens = self.tokenizer.tokenize("▁He is not<extra_id_0> ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "<extra_id_0>", "▁He"]) input_ids = self.tokenizer.encode("▁He is not ▁He") self.assertEqual(input_ids, [156, 46, 44, 156, 2]) tokens = self.tokenizer.tokenize("▁He is not ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "▁He"]) def test_character_after_special_token(self): input_ids = self.tokenizer.encode("Hey <extra_id_0>I") self.assertEqual(input_ids, [156, 30, 1001, 100, 2]) tokens = self.tokenizer.tokenize("Hey <extra_id_0>I") self.assertEqual(tokens, ["▁He", "y", "<extra_id_0>", "I"]) input_ids = self.tokenizer.encode("Hello, <extra_id_0>,") self.assertEqual(input_ids, [156, 86, 20, 3, 1001, 3, 2]) tokens = self.tokenizer.tokenize("Hello, <extra_id_0>,") self.assertEqual(tokens, ["▁He", "ll", "o", ",", "<extra_id_0>", ","]) def test_special_tokens_strip(self): input_ids = self.tokenizer.encode(" <extra_id_0> ,") self.assertEqual(input_ids, [1001, 7, 3, 2]) tokens = self.tokenizer.tokenize(" <extra_id_0> ,") self.assertEqual(tokens, ["<extra_id_0>", "▁", ","]) input_ids = self.tokenizer.encode("No <extra_id_0> He") self.assertEqual(input_ids, [284, 1001, 156, 2]) tokens = self.tokenizer.tokenize("No <extra_id_0> He") self.assertEqual(tokens, ["▁No", "<extra_id_0>", "▁He"]) tokenizer = T5Tokenizer(SAMPLE_VOCAB, extra_ids=0) tokenizer.add_special_tokens({"bos_token": AddedToken("<bos>")}) input_ids = tokenizer.encode("No <bos> He") self.assertEqual(input_ids, [284, 1001, 156, 2]) tokens = tokenizer.tokenize("No <bos> He") self.assertEqual(tokenizer.sp_model.encode("No ", out_type=str), ["▁No"]) self.assertEqual(tokens, ["▁No", "<bos>", "▁He"]) @require_seqio @unittest.skipIf( os.getenv("RUN_TOKENIZER_INTEGRATION", "0") == "0", "RUN_TOKENIZER_INTEGRATION=1 to run tokenizer integration tests", ) def test_integration_seqio(self): from datasets import load_dataset from seqio import SentencePieceVocabulary ds = load_dataset("xnli", "all_languages", split="train+test+validation") input_texts = [ "Bonjour <extra_id_0>.", " Hey <extra_id_0>I love you", ] import tqdm vocab_path = "gs://t5-data/vocabs/umt5.256000/sentencepiece.model" t5x_tokenizer = SentencePieceVocabulary(vocab_path, extra_ids=300) hf_tokenizer = T5Tokenizer.from_pretrained("google/umt5-small", legacy=False) for text in input_texts: self.assertEqual( hf_tokenizer.encode(text, add_special_tokens=False), t5x_tokenizer.tokenizer.tokenize(text), f"{text}" ) for texts in tqdm.tqdm(ds["premise"]): for text in texts: self.assertEqual( hf_tokenizer.encode(text, add_special_tokens=False), t5x_tokenizer.tokenizer.tokenize(text), f"{text}", ) hf_tokenizer = T5Tokenizer.from_pretrained("t5-small") vocab_path = "gs://t5-data/vocabs/cc_all.32000/sentencepiece.model" t5x_tokenizer = SentencePieceVocabulary(vocab_path, extra_ids=300) for text in input_texts: self.assertEqual( hf_tokenizer.encode(text, add_special_tokens=False), t5x_tokenizer.tokenizer.tokenize(text), f"{text}" ) for texts in tqdm.tqdm(ds["premise"]): for text in texts: self.assertEqual( hf_tokenizer.encode(text, add_special_tokens=False), t5x_tokenizer.tokenizer.tokenize(text), f"{text}", )
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch table transformer model import inspect import math import unittest from huggingfacehub import hfhubdownload from transformers import resnetconfig tabletransformerconfig istorchavailable isvisionavailable from transformers testingutils import requiretimm requiretorch requirevision slow torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import tabletransformerforobjectdetection tabletransformermodel if isvisionavailable from pil import image from transformers import autoimageprocessor class tabletransformermodeltester def init self parent batchsize8 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads8 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 numqueries12 numchannels3 minsize200 maxsize200 ntargets8 numlabels3 self parent parent self batchsize batchsize self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self numqueries numqueries self numchannels numchannels self minsize minsize self maxsize maxsize self ntargets ntargets self numlabels numlabels we also set the expected seq length for both encoder and decoder self encoderseqlength math ceilself minsize 32 math ceilself maxsize 32 self decoderseqlength self numqueries def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self minsize self maxsize pixelmask torch onesself batchsize self minsize self maxsize devicetorchdevice labels none if self uselabels labels is a list of dict each dict being the labels for a given example in the batch labels for i in rangeself batchsize target targetclasslabels torch randint highself numlabels sizeself ntargets devicetorchdevice targetboxes torch randself ntargets 4 devicetorchdevice targetmasks torch randself ntargets self minsize self maxsize devicetorchdevice labels appendtarget config self getconfig return config pixelvalues pixelmask labels def getconfigself resnetconfig resnetconfig numchannels3 embeddingssize10 hiddensizes10 20 30 40 depths1 1 2 1 hiddenactrelu numlabels3 outfeaturesstage2 stage3 stage4 outindices2 3 4 return tabletransformerconfig dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob numqueriesself numqueries numlabelsself numlabels usetimmbackbonefalse backboneconfigresnetconfig def prepareconfigandinputsforcommonself config pixelvalues pixelmask labels self prepareconfigandinputs inputsdict pixelvalues pixelvalues pixelmask pixelmask return config inputsdict def createandchecktabletransformermodelself config pixelvalues pixelmask labels model tabletransformermodelconfigconfig model totorchdevice model eval result modelpixelvaluespixelvalues pixelmaskpixelmask result modelpixelvalues self parent assertequal result lasthiddenstate shape self batchsize self decoderseqlength self hiddensize def createandchecktabletransformerobjectdetectionheadmodelself config pixelvalues pixelmask labels model tabletransformerforobjectdetectionconfigconfig model totorchdevice model eval result modelpixelvaluespixelvalues pixelmaskpixelmask result modelpixelvalues self parent assertequalresult logits shape self batchsize self numqueries self numlabels 1 self parent assertequalresult predboxes shape self batchsize self numqueries 4 result modelpixelvaluespixelvalues pixelmaskpixelmask labelslabels self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self numqueries self numlabels 1 self parent assertequalresult predboxes shape self batchsize self numqueries 4 def createandchecktabletransformernotimmbackboneself config pixelvalues pixelmask labels config usetimmbackbone false config backboneconfig resnetconfig model tabletransformerforobjectdetectionconfigconfig model totorchdevice model eval result modelpixelvaluespixelvalues pixelmaskpixelmask result modelpixelvalues self parent assertequalresult logits shape self batchsize self numqueries self numlabels 1 self parent assertequalresult predboxes shape self batchsize self numqueries 4 result modelpixelvaluespixelvalues pixelmaskpixelmask labelslabels self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self numqueries self numlabels 1 self parent assertequalresult predboxes shape self batchsize self numqueries 4 requiretorch class tabletransformermodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses tabletransformermodel tabletransformerforobjectdetection if istorchavailable else pipelinemodelmapping featureextraction tabletransformermodel objectdetection tabletransformerforobjectdetection if istorchavailable else isencoderdecoder true testtorchscript false testpruning false testheadmasking false testmissingkeys false special case for head models def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict super prepareforclassinputsdict modelclass returnlabelsreturnlabels if returnlabels if modelclass name in tabletransformerforobjectdetection labels for i in rangeself modeltester batchsize target targetclasslabels torch ones sizeself modeltester ntargets devicetorchdevice dtypetorch long targetboxes torch ones self modeltester ntargets 4 devicetorchdevice dtypetorch float targetmasks torch ones self modeltester ntargets self modeltester minsize self modeltester maxsize devicetorchdevice dtypetorch float labels appendtarget inputsdictlabels labels return inputsdict def setupself self modeltester tabletransformermodeltesterself self configtester configtesterself configclasstabletransformerconfig hastextmodalityfalse def testconfigself self configtester runcommontests def testtabletransformermodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandchecktabletransformermodelconfigandinputs def testtabletransformerobjectdetectionheadmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandchecktabletransformerobjectdetectionheadmodelconfigandinputs def testtabletransformernotimmbackboneself configandinputs self modeltester prepareconfigandinputs self modeltester createandchecktabletransformernotimmbackboneconfigandinputs unittest skipreasontable transformer does not use inputsembeds def testinputsembedsself pass unittest skipreasontable transformer does not have a getinputembeddings method def testmodelcommonattributesself pass unittest skipreasontable transformer is not a generative model def testgeneratewithoutinputidsself pass unittest skipreasontable transformer does not use token embeddings def testresizetokensembeddingsself pass slow def testmodeloutputsequivalenceself todo niels fix me pass def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true decoderseqlength self modeltester decoderseqlength encoderseqlength self modeltester encoderseqlength decoderkeylength self modeltester decoderseqlength encoderkeylength self modeltester encoderseqlength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength outlen lenoutputs if self isencoderdecoder correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning object detection model returns predlogits and predboxes if modelclass name tabletransformerforobjectdetection correctoutlen 2 if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength encoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes elif self isencoderdecoder addedhiddenstates 2 else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength def testretaingradhiddenstatesattentionsself removed retaingrad and grad on decoderhiddenstates as queries don t require grad config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice inputs self prepareforclassinputsdict modelclass outputs modelinputs output outputs0 encoderhiddenstates outputs encoderhiddenstates0 encoderattentions outputs encoderattentions0 encoderhiddenstates retaingrad encoderattentions retaingrad decoderattentions outputs decoderattentions0 decoderattentions retaingrad crossattentions outputs crossattentions0 crossattentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnoneencoderhiddenstates grad self assertisnotnoneencoderattentions grad self assertisnotnonedecoderattentions grad self assertisnotnonecrossattentions grad def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys if model config isencoderdecoder expectedargnames pixelvalues pixelmask expectedargnames extend headmask decoderheadmask encoderoutputs if headmask and decoderheadmask in argnames else self assertlistequalargnames lenexpectedargnames expectedargnames else expectedargnames pixelvalues pixelmask self assertlistequalargnames 1 expectedargnames def testdifferenttimmbackboneself config inputsdict self modeltester prepareconfigandinputsforcommon let s pick a random timm backbone config backbone tfmobilenetv3small075 for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if modelclass name tabletransformerforobjectdetection expectedshape self modeltester batchsize self modeltester numqueries self modeltester numlabels 1 self assertequaloutputs logits shape expectedshape self asserttrueoutputs def testgreyscaleimagesself config inputsdict self modeltester prepareconfigandinputsforcommon use greyscale pixel values inputsdictpixelvalues floatstensor self modeltester batchsize 1 self modeltester minsize self modeltester maxsize let s set numchannels to 1 config numchannels 1 config backboneconfig numchannels 1 for modelclass in self allmodelclasses model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self asserttrueoutputs def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig configsnoinit initxavierstd 1e9 for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad if bboxattention in name and bias not in name self assertless 100000 absparam data max item msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized tolerance 1e4 we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretimm requirevision slow class tabletransformermodelintegrationtestsunittest testcase def testtabledetectionself imageprocessor autoimageprocessor frompretrainedmicrosofttabletransformerdetection model tabletransformerforobjectdetection frompretrainedmicrosofttabletransformerdetection model totorchdevice filepath hfhubdownloadrepoidnielsrexamplepdf repotypedataset filenameexamplepdf png image image openfilepath convertrgb inputs imageprocessorimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs expectedshape 1 15 3 self assertequaloutputs logits shape expectedshape expectedlogits torch tensor 6 7329 16 9590 6 7447 8 0038 22 3071 6 9288 7 2445 20 9855 7 3465 devicetorchdevice self asserttruetorch allcloseoutputs logits0 3 3 expectedlogits atol1e4 expectedboxes torch tensor 0 4868 0 1764 0 6729 0 6674 0 4621 0 3864 0 4720 0 1757 0 6362 devicetorchdevice self asserttruetorch allcloseoutputs predboxes0 3 3 expectedboxes atol1e3 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch table transformer model we also set the expected seq length for both encoder and decoder labels is a list of dict each dict being the labels for a given example in the batch special case for head models todo niels fix me check that output_attentions also work using config loss is at first position loss is added to beginning object detection model returns pred_logits and pred_boxes past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine removed retain_grad and grad on decoder_hidden_states as queries don t require grad no need to test all models as different heads yield the same functionality signature parameters is an ordereddict so arg_names order is deterministic let s pick a random timm backbone use greyscale pixel values let s set num_channels to 1 we will verify our results on an image of cute cats forward pass
import inspect import math import unittest from huggingface_hub import hf_hub_download from transformers import ResNetConfig, TableTransformerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_timm, require_torch, require_vision, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import TableTransformerForObjectDetection, TableTransformerModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class TableTransformerModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=3, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return TableTransformerConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, use_timm_backbone=False, backbone_config=resnet_config, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_table_transformer_model(self, config, pixel_values, pixel_mask, labels): model = TableTransformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_table_transformer_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = TableTransformerForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) def create_and_check_table_transformer_no_timm_backbone(self, config, pixel_values, pixel_mask, labels): config.use_timm_backbone = False config.backbone_config = ResNetConfig() model = TableTransformerForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class TableTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TableTransformerModel, TableTransformerForObjectDetection, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TableTransformerModel, "object-detection": TableTransformerForObjectDetection} if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["TableTransformerForObjectDetection"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = TableTransformerModelTester(self) self.config_tester = ConfigTester(self, config_class=TableTransformerConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_table_transformer_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_table_transformer_model(*config_and_inputs) def test_table_transformer_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_table_transformer_object_detection_head_model(*config_and_inputs) def test_table_transformer_no_timm_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_table_transformer_no_timm_backbone(*config_and_inputs) @unittest.skip(reason="Table Transformer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Table Transformer does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="Table Transformer is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Table Transformer does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow def test_model_outputs_equivalence(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if model_class.__name__ == "TableTransformerForObjectDetection": correct_outlen += 2 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "TableTransformerForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels + 1, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_greyscale_images(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict["pixel_values"] = floats_tensor( [self.model_tester.batch_size, 1, self.model_tester.min_size, self.model_tester.max_size] ) config.num_channels = 1 config.backbone_config.num_channels = 1 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class TableTransformerModelIntegrationTests(unittest.TestCase): def test_table_detection(self): image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection") model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-detection") model.to(torch_device) file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png") image = Image.open(file_path).convert("RGB") inputs = image_processor(image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = (1, 15, 3) self.assertEqual(outputs.logits.shape, expected_shape) expected_logits = torch.tensor( [[-6.7329, -16.9590, 6.7447], [-8.0038, -22.3071, 6.9288], [-7.2445, -20.9855, 7.3465]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_boxes = torch.tensor( [[0.4868, 0.1764, 0.6729], [0.6674, 0.4621, 0.3864], [0.4720, 0.1757, 0.6362]], device=torch_device ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-3))
codingutf8 2020 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license you can also import this e g from testmodelingtapas import tapasmodeltester def init self parent batchsize13 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 initializerrange0 02 maxpositionembeddings512 typevocabsizes3 256 256 2 256 256 10 typesequencelabelsize2 positiveweight10 0 numaggregationlabels4 numlabels2 aggregationlossimportance0 8 useanswerassupervisiontrue answerlossimportance0 001 usenormalizedanswerlossfalse huberlossdelta25 0 temperature1 0 aggtemperature1 0 usegumbelforcellsfalse usegumbelforaggfalse averageapproximationfunctionratio cellselectionpreference0 5 answerlosscutoff100 maxnumrows64 maxnumcolumns32 averagelogitspercelltrue selectonecolumntrue allowemptycolumnselectionfalse initcellselectionweightstozerotrue resetpositionindexpercelltrue disablepertokenlossfalse scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self maxpositionembeddings maxpositionembeddings self typevocabsizes typevocabsizes self typesequencelabelsize typesequencelabelsize self positiveweight positiveweight self numaggregationlabels numaggregationlabels self numlabels numlabels self aggregationlossimportance aggregationlossimportance self useanswerassupervision useanswerassupervision self answerlossimportance answerlossimportance self usenormalizedanswerloss usenormalizedanswerloss self huberlossdelta huberlossdelta self temperature temperature self aggtemperature aggtemperature self usegumbelforcells usegumbelforcells self usegumbelforagg usegumbelforagg self averageapproximationfunction averageapproximationfunction self cellselectionpreference cellselectionpreference self answerlosscutoff answerlosscutoff self maxnumrows maxnumrows self maxnumcolumns maxnumcolumns self averagelogitspercell averagelogitspercell self selectonecolumn selectonecolumn self allowemptycolumnselection allowemptycolumnselection self initcellselectionweightstozero initcellselectionweightstozero self resetpositionindexpercell resetpositionindexpercell self disablepertokenloss disablepertokenloss self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize totorchdevice inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength totorchdevice tokentypeids for typevocabsize in self typevocabsizes tokentypeids appendidstensorshapeself batchsize self seqlength vocabsizetypevocabsize tokentypeids torch stacktokentypeids dim2 totorchdevice sequencelabels none tokenlabels none labels none numericvalues none numericvaluesscale none floatanswer none aggregationlabels none if self uselabels sequencelabels idstensorself batchsize self typesequencelabelsize totorchdevice tokenlabels idstensorself batchsize self seqlength self numlabels totorchdevice labels idstensorself batchsize self seqlength vocabsize2 totorchdevice numericvalues floatstensorself batchsize self seqlength totorchdevice numericvaluesscale floatstensorself batchsize self seqlength totorchdevice floatanswer floatstensorself batchsize totorchdevice aggregationlabels idstensorself batchsize self numaggregationlabels totorchdevice config self getconfig return config inputids inputmask tokentypeids sequencelabels tokenlabels labels numericvalues numericvaluesscale floatanswer aggregationlabels def getconfigself return tapasconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizesself typevocabsizes initializerrangeself initializerrange positiveweightself positiveweight numaggregationlabelsself numaggregationlabels numlabelsself numlabels aggregationlossimportanceself aggregationlossimportance useanswerassupervisionself useanswerassupervision answerlossimportanceself answerlossimportance usenormalizedanswerlossself usenormalizedanswerloss huberlossdeltaself huberlossdelta temperatureself temperature aggtemperatureself aggtemperature usegumbelforcellsself usegumbelforcells usegumbelforaggself usegumbelforagg averageapproximationfunctionself averageapproximationfunction cellselectionpreferenceself cellselectionpreference answerlosscutoffself answerlosscutoff maxnumrowsself maxnumrows maxnumcolumnsself maxnumcolumns averagelogitspercellself averagelogitspercell selectonecolumnself selectonecolumn allowemptycolumnselectionself allowemptycolumnselection initcellselectionweightstozeroself initcellselectionweightstozero resetpositionindexpercellself resetpositionindexpercell disablepertokenlossself disablepertokenloss def createandcheckmodel self config inputids inputmask tokentypeids sequencelabels tokenlabels labels numericvalues numericvaluesscale floatanswer aggregationlabels model tapasmodelconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def createandcheckformaskedlm self config inputids inputmask tokentypeids sequencelabels tokenlabels labels numericvalues numericvaluesscale floatanswer aggregationlabels model tapasformaskedlmconfigconfig model totorchdevice model eval result modelinputids attentionmaskinputmask tokentypeidstokentypeids labelstokenlabels self parent assertequalresult logits shape self batchsize self seqlength self vocabsize def createandcheckforquestionanswering self config inputids inputmask tokentypeids sequencelabels tokenlabels labels numericvalues numericvaluesscale floatanswer aggregationlabels inference without aggregation head sqa model only returns logits sqaconfig copy copyconfig sqaconfig numaggregationlabels 0 sqaconfig useanswerassupervision false model tapasforquestionansweringconfigsqaconfig model totorchdevice model eval result model inputidsinputids attentionmaskinputmask tokentypeidstokentypeids self parent assertequalresult logits shape self batchsize self seqlength inference with aggregation head wtq wikisqlsupervised model returns logits and aggregation logits model tapasforquestionansweringconfigconfig model totorchdevice model eval result model inputidsinputids attentionmaskinputmask tokentypeidstokentypeids self parent assertequalresult logits shape self batchsize self seqlength self parent assertequalresult logitsaggregation shape self batchsize self numaggregationlabels training can happen in 3 main ways case 1 conversational sqa model tapasforquestionansweringconfigsqaconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids labelslabels self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self seqlength case 2 weak supervision for aggregation wtq model tapasforquestionansweringconfigconfig model totorchdevice model eval result model inputidsinputids attentionmaskinputmask tokentypeidstokentypeids labelslabels numericvaluesnumericvalues numericvaluesscalenumericvaluesscale floatanswerfloatanswer self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self seqlength self parent assertequalresult logitsaggregation shape self batchsize self numaggregationlabels case 3 strong supervision for aggregation wikisqlsupervised wikisqlconfig copy copyconfig wikisqlconfig useanswerassupervision false model tapasforquestionansweringconfigwikisqlconfig model totorchdevice model eval result model inputids attentionmaskinputmask tokentypeidstokentypeids labelslabels aggregationlabelsaggregationlabels self parent assertequalresult loss shape self parent assertequalresult logits shape self batchsize self seqlength self parent assertequalresult logitsaggregation shape self batchsize self numaggregationlabels def createandcheckforsequenceclassification self config inputids inputmask tokentypeids sequencelabels tokenlabels labels numericvalues numericvaluesscale floatanswer aggregationlabels config numlabels self numlabels model tapasforsequenceclassificationconfig model totorchdevice model eval result modelinputids attentionmaskinputmask labelssequencelabels self parent assertequalresult logits shape self batchsize self numlabels def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask tokentypeids sequencelabels tokenlabels labels numericvalues numericvaluesscale floatanswer aggregationlabels configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict unittest skipifnot istorchgreaterorequalthan112 reasontapas is only available in torch v1 12 requiretorch class tapasmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tapasmodel tapasformaskedlm tapasforquestionanswering tapasforsequenceclassification if istorchavailable else none pipelinemodelmapping featureextraction tapasmodel fillmask tapasformaskedlm tablequestionanswering tapasforquestionanswering textclassification tapasforsequenceclassification zeroshot tapasforsequenceclassification if istorchavailable else testpruning false testresizeembeddings true testheadmasking false def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict copy deepcopyinputsdict if modelclass in getvaluesmodelformultiplechoicemapping inputsdict k v unsqueeze1 expand1 self modeltester numchoices 1 contiguous if isinstancev torch tensor and v ndim 1 else v for k v in inputsdict items if returnlabels if modelclass in getvaluesmodelformultiplechoicemapping inputsdictlabels torch onesself modeltester batchsize dtypetorch long devicetorchdevice elif modelclass in getvaluesmodelfortablequestionansweringmapping inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice inputsdictaggregationlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice inputsdictnumericvalues torch zeros self modeltester batchsize self modeltester seqlength dtypetorch float devicetorchdevice inputsdictnumericvaluesscale torch zeros self modeltester batchsize self modeltester seqlength dtypetorch float devicetorchdevice inputsdictfloatanswer torch zeros self modeltester batchsize dtypetorch float devicetorchdevice elif modelclass in getvaluesmodelforsequenceclassificationmapping getvaluesmodelfornextsentencepredictionmapping inputsdictlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice elif modelclass in getvaluesmodelfortokenclassificationmapping getvaluesmodelforcausallmmapping getvaluesmodelformaskedlmmapping getvaluesmodelforseqtoseqcausallmmapping inputsdictlabels torch zeros self modeltester batchsize self modeltester seqlength dtypetorch long devicetorchdevice return inputsdict todo fix the failed tests def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname return true def setupself self modeltester tapasmodeltesterself self configtester configtesterself configclasstapasconfig dim37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testformaskedlmself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedlmconfigandinputs def testforquestionansweringself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforquestionansweringconfigandinputs def testforsequenceclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsequenceclassificationconfigandinputs requiretensorflowprobability def testpttfmodelequivalenceself super testpttfmodelequivalence def preparetapassingleinputsforinference here we prepare a single tablequestion pair to test tapas inference on data footballer lionel messi cristiano ronaldo age 33 35 queries which footballer is 33 years old table pd dataframe fromdictdata return table queries def preparetapasbatchinputsforinference here we prepare a batch of 2 tablequestion pairs to test tapas inference on data footballer lionel messi cristiano ronaldo age 33 35 number of goals 712 750 queries which footballer is 33 years old how many goals does ronaldo have table pd dataframe fromdictdata return table queries def preparetapasbatchinputsfortraining here we prepare a different batch of 2 tablequestion pairs to test tapas training on data footballer lionel messi cristiano ronaldo age 33 35 number of goals 712 750 queries which footballer is 33 years old what s the total number of goals table pd dataframe fromdictdata answercoordinates 0 0 0 2 1 2 answertext lionel messi 1462 floatanswer floatnan float1462 return table queries answercoordinates answertext floatanswer unittest skipifnot istorchgreaterorequalthan112 reasontapas is only available in torch v1 12 requiretorch class tapasmodelintegrationtestunittest testcase cachedproperty def defaulttokenizerself return tapastokenizer frompretrainedgoogletapasbasefinetunedwtq slow def testinferencenoheadself ideally we want to test this with the weights of tapasintermasklmbasereset but since it s not straightforward to do this with the tf 1 implementation we test it with the weights of the wtq base model i e tapaswtqwikisqlsqaintermasklmbasereset model tapasmodel frompretrainedgoogletapasbasefinetunedwtq totorchdevice tokenizer self defaulttokenizer table queries preparetapassingleinputsforinference inputs tokenizertabletable queriesqueries returntensorspt inputs k v totorchdevice for k v in inputs items with torch nograd outputs modelinputs test the sequence output expectedslice torch tensor 0 141581565 0 599805772 0 747186482 0 143664181 0 602008104 0 749218345 0 15169853 0 603363097 0 741370678 devicetorchdevice self asserttruetorch allcloseoutputs lasthiddenstate 3 3 expectedslice atol0 0005 test the pooled output expectedslice torch tensor0 987518311 0 970520139 0 994303405 devicetorchdevice self asserttruetorch allcloseoutputs pooleroutput 3 expectedslice atol0 0005 unittest skipreasonmodel not available yet def testinferencemaskedlmself pass tapasforquestionanswering has 3 possible ways of being finetuned conversational setup sqa weak supervision for aggregation wtq wikisql strong supervision for aggregation wikisqlsupervised we test all of them slow def testinferencequestionansweringheadconversationalself note that googletapasbasefinetunedsqa should correspond to tapassqaintermasklmbasereset model tapasforquestionanswering frompretrainedgoogletapasbasefinetunedsqa totorchdevice tokenizer self defaulttokenizer table queries preparetapassingleinputsforinference inputs tokenizertabletable queriesqueries returntensorspt inputs k v totorchdevice for k v in inputs items with torch nograd outputs modelinputs test the logits logits outputs logits expectedshape torch size1 21 self assertequallogits shape expectedshape expectedtensor torch tensor 9997 22461 9997 22461 9997 22461 9997 22461 9997 22461 9997 22461 9997 22461 9997 22461 9997 22461 16 2628059 10004 082 15 4330549 15 4330549 15 4330549 9990 42 16 3270779 16 3270779 16 3270779 16 3270779 16 3270779 10004 8506 devicetorchdevice self asserttruetorch allcloselogits expectedtensor atol0 015 slow def testinferencequestionansweringheadconversationalabsoluteembeddingsself note that googletapassmallfinetunedsqa should correspond to tapassqaintermasklmsmallreset however here we test the version with absolute position embeddings model tapasforquestionanswering frompretrainedgoogletapassmallfinetunedsqa revisionnoreset to torchdevice tokenizer self defaulttokenizer table queries preparetapassingleinputsforinference inputs tokenizertabletable queriesqueries returntensorspt inputs k v totorchdevice for k v in inputs items with torch nograd outputs modelinputs test the logits logits outputs logits expectedshape torch size1 21 self assertequallogits shape expectedshape expectedtensor torch tensor 10014 7793 10014 7793 10014 7793 10014 7793 10014 7793 10014 7793 10014 7793 10014 7793 10014 7793 18 8419304 10018 0391 17 7848816 17 7848816 17 7848816 9981 02832 16 4005489 16 4005489 16 4005489 16 4005489 16 4005489 10013 4736 devicetorchdevice self asserttruetorch allcloselogits expectedtensor atol0 01 slow def testinferencequestionansweringheadweaksupervisionself note that googletapasbasefinetunedwtq should correspond to tapaswtqwikisqlsqaintermasklmbasereset model tapasforquestionanswering frompretrainedgoogletapasbasefinetunedwtq totorchdevice tokenizer self defaulttokenizer let s test on a batch table queries preparetapasbatchinputsforinference inputs tokenizertabletable queriesqueries paddinglongest returntensorspt inputsondevice k v totorchdevice for k v in inputs items with torch nograd outputs modelinputsondevice test the logits logits outputs logits expectedshape torch size2 28 self assertequallogits shape expectedshape expectedslice torch tensor 160 375504 160 375504 160 375504 10072 3965 10070 9414 10094 9736 9861 6123 9861 6123 9861 6123 9861 6123 9891 01172 146 600677 devicetorchdevice self asserttruetorch allcloselogits 6 expectedslice atol0 4 test the aggregation logits logitsaggregation outputs logitsaggregation expectedshape torch size2 4 self assertequallogitsaggregation shape expectedshape expectedtensor torch tensor 18 8545208 9 76614857 6 3128891 2 93525243 4 05782509 40 0351 5 35329962 23 3978653 devicetorchdevice self asserttruetorch allcloselogitsaggregation expectedtensor atol0 001 test the predicted answer coordinates and aggregation indices expectedpredictedanswercoordinates 0 0 1 2 expectedpredictedaggregationindices 0 1 predictedanswercoordinates predictedaggregationindices tokenizer convertlogitstopredictions inputs outputs logits detach cpu outputs logitsaggregation detach cpu self assertequalexpectedpredictedanswercoordinates predictedanswercoordinates self assertequalexpectedpredictedaggregationindices predictedaggregationindices slow def testtrainingquestionansweringheadweaksupervisionself note that googletapasbasefinetunedwtq should correspond to tapaswtqwikisqlsqaintermasklmbasereset model tapasforquestionanswering frompretrainedgoogletapasbasefinetunedwtq totorchdevice model totorchdevice normally we should put the model in training mode but it s a pain to do this with the tf 1 implementation tokenizer self defaulttokenizer let s test on a batch table queries answercoordinates answertext floatanswer preparetapasbatchinputsfortraining inputs tokenizer tabletable queriesqueries answercoordinatesanswercoordinates answertextanswertext paddinglongest returntensorspt prepare data the tokenizer and move to torchdevice inputids inputsinputids totorchdevice attentionmask inputsattentionmask totorchdevice tokentypeids inputstokentypeids totorchdevice labels inputslabels totorchdevice numericvalues inputsnumericvalues totorchdevice numericvaluesscale inputsnumericvaluesscale totorchdevice the answer should be prepared by the user floatanswer torch floattensorfloatanswer totorchdevice forward pass to get loss logits with torch nograd outputs model inputidsinputids attentionmaskattentionmask tokentypeidstokentypeids labelslabels numericvaluesnumericvalues numericvaluesscalenumericvaluesscale floatanswerfloatanswer test the loss loss outputs loss expectedloss torch tensor3 3527612686157227e08 devicetorchdevice self asserttruetorch allcloseloss expectedloss atol1e6 test the logits on the first example logits outputs logits expectedshape torch size2 29 self assertequallogits shape expectedshape expectedslice torch tensor 160 0156 160 0156 160 0156 160 0156 160 0156 10072 2266 10070 8896 10092 6006 10092 6006 devicetorchdevice self asserttruetorch allcloselogits0 9 expectedslice atol1e6 test the aggregation logits on the second example logitsaggregation outputs logitsaggregation expectedshape torch size2 4 self assertequallogitsaggregation shape expectedshape expectedslice torch tensor4 0538 40 0304 5 3554 23 3965 devicetorchdevice self asserttruetorch allcloselogitsaggregation1 4 expectedslice atol1e4 slow def testinferencequestionansweringheadstrongsupervisionself note that googletapasbasefinetunedwikisqlsupervised should correspond to tapaswikisqlsqaintermasklmbasereset model tapasforquestionanswering frompretrainedgoogletapasbasefinetunedwikisqlsupervised to torchdevice tokenizer self defaulttokenizer table queries preparetapassingleinputsforinference inputs tokenizertabletable queriesqueries returntensorspt inputs k v totorchdevice for k v in inputs items with torch nograd outputs modelinputs test the logits logits outputs logits expectedshape torch size1 21 self assertequallogits shape expectedshape expectedtensor torch tensor 10011 1084 10011 1084 10011 1084 10011 1084 10011 1084 10011 1084 10011 1084 10011 1084 10011 1084 18 6185989 10008 7969 17 6355762 17 6355762 17 6355762 10002 4404 18 7111301 18 7111301 18 7111301 18 7111301 18 7111301 10007 0977 devicetorchdevice self asserttruetorch allcloselogits expectedtensor atol0 02 test the aggregation logits logitsaggregation outputs logitsaggregation expectedshape torch size1 4 self assertequallogitsaggregation shape expectedshape expectedtensor torch tensor 16 5659733 3 06624889 2 34152961 0 970244825 devicetorchdevice pytorch model outputs 16 5679 3 0668 2 3442 0 9674 self asserttruetorch allcloselogitsaggregation expectedtensor atol0 003 slow def testinferenceclassificationheadself note that googletapasbasefinetunedtabfact should correspond to tapastabfactintermasklmbasereset model tapasforsequenceclassification frompretrainedgoogletapasbasefinetunedtabfact totorchdevice tokenizer self defaulttokenizer table queries preparetapassingleinputsforinference inputs tokenizertabletable queriesqueries paddinglongest returntensorspt inputs k v totorchdevice for k v in inputs items with torch nograd outputs modelinputs test the classification logits logits outputs logits expectedshape torch size1 2 self assertequallogits shape expectedshape expectedtensor torch tensor 0 795137286 9 5572 devicetorchdevice note that the pytorch model outputs 0 8057 9 5281 self asserttruetorch allcloseoutputs logits expectedtensor atol0 05 below tests for tapas utilities which are defined in modelingtapas py these are based on segmentedtensortest py of the original implementation url https github comgoogleresearchtapasblobmastertapasmodelssegmentedtensortest py unittest skipifnot istorchgreaterorequalthan112 reasontapas is only available in torch v1 12 requiretorch class tapasutilitiestestunittest testcase def preparetablesself values torch tensor 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 rowindex indexmap indicestorch tensor 0 0 0 1 1 1 2 2 2 0 0 0 1 1 1 2 2 2 numsegments3 batchdims1 colindex indexmap indicestorch tensor 0 0 1 0 0 1 0 0 1 0 1 2 0 1 2 0 1 2 numsegments3 batchdims1 return values rowindex colindex def testproductindexself rowindex colindex self preparetables cellindex productindexmaprowindex colindex rowindexproj cellindex projectoutercellindex colindexproj cellindex projectinnercellindex ind cellindex indices self assertequalcellindex numsegments 9 projections should give back the original indices we use np testing assertarrayequal rather than tensorflow s assertallequal np testing assertarrayequalrowindex indices numpy rowindexproj indices numpy self assertequalrowindex numsegments rowindexproj numsegments self assertequalrowindex batchdims rowindexproj batchdims we use np testing assertarrayequal rather than tensorflow s assertallequal np testing assertarrayequalcolindex indices numpy colindexproj indices numpy self assertequalcolindex batchdims colindexproj batchdims the first and second column are identified in the first table for i in range3 self assertequalind0 i 0 ind0 i 1 self assertnotequalind0 i 0 ind0 i 2 all rows are distinct in the first table for i i2 in ziprange3 range3 for j j2 in ziprange3 range3 if i i2 and j j2 self assertnotequalind0 i j ind0 i2 j2 all cells are distinct in the second table for i i2 in ziprange3 range3 for j j2 in ziprange3 range3 if i i2 or j j2 self assertnotequalind1 i j ind1 i2 j2 def testflattenself rowindex colindex self preparetables rowindexflat flattenrowindex colindexflat flattencolindex shape 3 4 5 batchedindex indexmapindicestorch zerosshape typetorch longtensor numsegments1 batchdims3 batchedindexflat flattenbatchedindex we use np testing assertarrayequal rather than tensorflow s assertallequal np testing assertarrayequal rowindexflat indices numpy 0 0 0 1 1 1 2 2 2 3 3 3 4 4 4 5 5 5 np testing assertarrayequal colindexflat indices numpy 0 0 1 0 0 1 0 0 1 3 4 5 3 4 5 3 4 5 self assertequalbatchedindexflat numsegments numpy np prodshape np testing assertarrayequalbatchedindexflat indices numpy rangenp prodshape def testrangeindexmapself batchshape 3 4 numsegments 5 index rangeindexmapbatchshape numsegments self assertequalnumsegments index numsegments self assertequal2 index batchdims indices index indices we use np testing assertarrayequal rather than tensorflow s assertallequal np testing assertarrayequallistindices size 3 4 5 for i in rangebatchshape0 for j in rangebatchshape1 we use np testing assertarrayequal rather than tensorflow s assertallequal np testing assertarrayequalindicesi j numpy rangenumsegments def testreducesumself values rowindex colindex self preparetables cellindex productindexmaprowindex colindex rowsum reducesumvalues rowindex colsum reducesumvalues colindex cellsum reducesumvalues cellindex we use np testing assertallclose rather than tensorflow s assertallclose np testing assertallcloserowsum numpy 6 0 3 0 8 0 6 0 3 0 8 0 np testing assertallclosecolsum numpy 9 0 8 0 0 0 4 0 5 0 8 0 np testing assertallclose cellsum numpy 3 0 3 0 0 0 2 0 1 0 0 0 4 0 4 0 0 0 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 def testreducemeanself values rowindex colindex self preparetables cellindex productindexmaprowindex colindex rowmean reducemeanvalues rowindex colmean reducemeanvalues colindex cellmean reducemeanvalues cellindex we use np testing assertallclose rather than tensorflow s assertallclose np testing assertallclose rowmean numpy 6 0 3 0 3 0 3 0 8 0 3 0 6 0 3 0 3 0 3 0 8 0 3 0 np testing assertallclosecolmean numpy 9 0 6 0 8 0 3 0 0 0 4 0 3 0 5 0 3 0 8 0 3 0 np testing assertallclose cellmean numpy 3 0 2 0 3 0 0 0 2 0 2 0 1 0 0 0 4 0 2 0 4 0 0 0 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 def testreducemaxself values torch astensor2 0 1 0 0 0 3 0 index indexmapindicestorch astensor0 1 0 1 numsegments2 maximum reducemaxvalues index we use np testing assertarrayequal rather than tensorflow s assertallequal np testing assertarrayequalmaximum numpy 2 3 def testreducesumvectorizedself values torch astensor1 0 2 0 3 0 2 0 3 0 4 0 3 0 4 0 5 0 index indexmapindicestorch astensor0 0 1 numsegments2 batchdims0 sums newindex reducesumvalues index we use np testing assertallclose rather than tensorflow s assertallclose np testing assertallclosesums numpy 3 0 3 0 we use np testing assertarrayequal rather than tensorflow s assertallequal np testing assertarrayequalnewindex indices numpy 0 1 np testing assertarrayequalnewindex numsegments numpy 2 np testing assertarrayequalnewindex batchdims 0 def testgatherself values rowindex colindex self preparetables cellindex productindexmaprowindex colindex compute sums and then gather the result should have the same shape as the original table and each element should contain the sum the values in its cell sums reducesumvalues cellindex cellsum gathersums cellindex assert cellsum size values size we use np testing assertarrayequal rather than tensorflow s assertallequal np testing assertallclose cellsum numpy 3 0 3 0 3 0 2 0 2 0 1 0 4 0 4 0 4 0 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 def testgathervectorizedself values torch astensor1 2 3 4 5 6 7 8 index indexmapindicestorch astensor0 1 1 0 numsegments2 batchdims1 result gathervalues index we use np testing assertarrayequal rather than tensorflow s assertallequal np testing assertarrayequalresult numpy 1 2 3 4 7 8 5 6 coding utf 8 2020 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license you can also import this e g from test_modeling_tapas import tapasmodeltester inference without aggregation head sqa model only returns logits inference with aggregation head wtq wikisql supervised model returns logits and aggregation logits training can happen in 3 main ways case 1 conversational sqa case 2 weak supervision for aggregation wtq case 3 strong supervision for aggregation wikisql supervised todo fix the failed tests here we prepare a single table question pair to test tapas inference on here we prepare a batch of 2 table question pairs to test tapas inference on here we prepare a different batch of 2 table question pairs to test tapas training on ideally we want to test this with the weights of tapas_inter_masklm_base_reset but since it s not straightforward to do this with the tf 1 implementation we test it with the weights of the wtq base model i e tapas_wtq_wikisql_sqa_inter_masklm_base_reset test the sequence output test the pooled output tapasforquestionanswering has 3 possible ways of being fine tuned conversational set up sqa weak supervision for aggregation wtq wikisql strong supervision for aggregation wikisql supervised we test all of them note that google tapas base finetuned sqa should correspond to tapas_sqa_inter_masklm_base_reset test the logits note that google tapas small finetuned sqa should correspond to tapas_sqa_inter_masklm_small_reset however here we test the version with absolute position embeddings test the logits note that google tapas base finetuned wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset let s test on a batch test the logits test the aggregation logits test the predicted answer coordinates and aggregation indices note that google tapas base finetuned wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset normally we should put the model in training mode but it s a pain to do this with the tf 1 implementation let s test on a batch prepare data the tokenizer and move to torch_device the answer should be prepared by the user forward pass to get loss logits test the loss test the logits on the first example test the aggregation logits on the second example note that google tapas base finetuned wikisql supervised should correspond to tapas_wikisql_sqa_inter_masklm_base_reset test the logits test the aggregation logits pytorch model outputs 16 5679 3 0668 2 3442 0 9674 note that google tapas base finetuned tabfact should correspond to tapas_tabfact_inter_masklm_base_reset test the classification logits note that the pytorch model outputs 0 8057 9 5281 below tests for tapas utilities which are defined in modeling_tapas py these are based on segmented_tensor_test py of the original implementation url https github com google research tapas blob master tapas models segmented_tensor_test py prepares two tables both with three distinct rows the first table has two columns 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 the second table has three columns 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 returns segmentedtensors with the tables projections should give back the original indices we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_array_equal rather than tensorflow s assertallequal the first and second column are identified in the first table all rows are distinct in the first table all cells are distinct in the second table we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_allclose rather than tensorflow s assertallclose we use np testing assert_allclose rather than tensorflow s assertallclose we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_allclose rather than tensorflow s assertallclose we use np testing assert_array_equal rather than tensorflow s assertallequal compute sums and then gather the result should have the same shape as the original table and each element should contain the sum the values in its cell we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_array_equal rather than tensorflow s assertallequal
import copy import unittest import numpy as np import pandas as pd from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TapasConfig, is_torch_available, ) from transformers.models.auto import get_values from transformers.testing_utils import require_tensorflow_probability, require_torch, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, ) from transformers.models.tapas.modeling_tapas import ( IndexMap, ProductIndexMap, flatten, gather, range_index_map, reduce_max, reduce_mean, reduce_sum, ) from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_12 else: is_torch_greater_or_equal_than_1_12 = False class TapasModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_position_embeddings=512, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], type_sequence_label_size=2, positive_weight=10.0, num_aggregation_labels=4, num_labels=2, aggregation_loss_importance=0.8, use_answer_as_supervision=True, answer_loss_importance=0.001, use_normalized_answer_loss=False, huber_loss_delta=25.0, temperature=1.0, agg_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_agg=False, average_approximation_function="ratio", cell_selection_preference=0.5, answer_loss_cutoff=100, max_num_rows=64, max_num_columns=32, average_logits_per_cell=True, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=True, reset_position_index_per_cell=True, disable_per_token_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.max_position_embeddings = max_position_embeddings self.type_vocab_sizes = type_vocab_sizes self.type_sequence_label_size = type_sequence_label_size self.positive_weight = positive_weight self.num_aggregation_labels = num_aggregation_labels self.num_labels = num_labels self.aggregation_loss_importance = aggregation_loss_importance self.use_answer_as_supervision = use_answer_as_supervision self.answer_loss_importance = answer_loss_importance self.use_normalized_answer_loss = use_normalized_answer_loss self.huber_loss_delta = huber_loss_delta self.temperature = temperature self.agg_temperature = agg_temperature self.use_gumbel_for_cells = use_gumbel_for_cells self.use_gumbel_for_agg = use_gumbel_for_agg self.average_approximation_function = average_approximation_function self.cell_selection_preference = cell_selection_preference self.answer_loss_cutoff = answer_loss_cutoff self.max_num_rows = max_num_rows self.max_num_columns = max_num_columns self.average_logits_per_cell = average_logits_per_cell self.select_one_column = select_one_column self.allow_empty_column_selection = allow_empty_column_selection self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero self.reset_position_index_per_cell = reset_position_index_per_cell self.disable_per_token_loss = disable_per_token_loss self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).to(torch_device) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]).to(torch_device) token_type_ids = [] for type_vocab_size in self.type_vocab_sizes: token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size)) token_type_ids = torch.stack(token_type_ids, dim=2).to(torch_device) sequence_labels = None token_labels = None labels = None numeric_values = None numeric_values_scale = None float_answer = None aggregation_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size).to(torch_device) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels).to(torch_device) labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2).to(torch_device) numeric_values = floats_tensor([self.batch_size, self.seq_length]).to(torch_device) numeric_values_scale = floats_tensor([self.batch_size, self.seq_length]).to(torch_device) float_answer = floats_tensor([self.batch_size]).to(torch_device) aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels).to(torch_device) config = self.get_config() return ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) def get_config(self): return TapasConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_sizes=self.type_vocab_sizes, initializer_range=self.initializer_range, positive_weight=self.positive_weight, num_aggregation_labels=self.num_aggregation_labels, num_labels=self.num_labels, aggregation_loss_importance=self.aggregation_loss_importance, use_answer_as_supervision=self.use_answer_as_supervision, answer_loss_importance=self.answer_loss_importance, use_normalized_answer_loss=self.use_normalized_answer_loss, huber_loss_delta=self.huber_loss_delta, temperature=self.temperature, agg_temperature=self.agg_temperature, use_gumbel_for_cells=self.use_gumbel_for_cells, use_gumbel_for_agg=self.use_gumbel_for_agg, average_approximation_function=self.average_approximation_function, cell_selection_preference=self.cell_selection_preference, answer_loss_cutoff=self.answer_loss_cutoff, max_num_rows=self.max_num_rows, max_num_columns=self.max_num_columns, average_logits_per_cell=self.average_logits_per_cell, select_one_column=self.select_one_column, allow_empty_column_selection=self.allow_empty_column_selection, init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero, reset_position_index_per_cell=self.reset_position_index_per_cell, disable_per_token_loss=self.disable_per_token_loss, ) def create_and_check_model( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TapasModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TapasForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): sqa_config = copy.copy(config) sqa_config.num_aggregation_labels = 0 sqa_config.use_answer_as_supervision = False model = TapasForQuestionAnswering(config=sqa_config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) model = TapasForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) model = TapasForQuestionAnswering(config=sqa_config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=labels, ) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) model = TapasForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, float_answer=float_answer, ) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) wikisql_config = copy.copy(config) wikisql_config.use_answer_as_supervision = False model = TapasForQuestionAnswering(config=wikisql_config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=labels, aggregation_labels=aggregation_labels, ) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): config.num_labels = self.num_labels model = TapasForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch class TapasModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TapasModel, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": TapasModel, "fill-mask": TapasForMaskedLM, "table-question-answering": TapasForQuestionAnswering, "text-classification": TapasForSequenceClassification, "zero-shot": TapasForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = True test_head_masking = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class in get_values(MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["aggregation_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["numeric_values"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.float, device=torch_device, ) inputs_dict["numeric_values_scale"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.float, device=torch_device, ) inputs_dict["float_answer"] = torch.zeros( self.model_tester.batch_size, dtype=torch.float, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(MODEL_FOR_MASKED_LM_MAPPING), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = TapasModelTester(self) self.config_tester = ConfigTester(self, config_class=TapasConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) @require_tensorflow_probability def test_pt_tf_model_equivalence(self): super().test_pt_tf_model_equivalence() def prepare_tapas_single_inputs_for_inference(): data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], } queries = "Which footballer is 33 years old?" table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_inference(): data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"] table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_training(): data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "What's the total number of goals?"] table = pd.DataFrame.from_dict(data) answer_coordinates = [[(0, 0)], [(0, 2), (1, 2)]] answer_text = [["Lionel Messi"], ["1462"]] float_answer = [float("NaN"), float("1462")] return table, queries, answer_coordinates, answer_text, float_answer @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch class TapasModelIntegrationTest(unittest.TestCase): @cached_property def default_tokenizer(self): return TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") @slow def test_inference_no_head(self): model = TapasModel.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) expected_slice = torch.tensor( [ [ [-0.141581565, -0.599805772, 0.747186482], [-0.143664181, -0.602008104, 0.749218345], [-0.15169853, -0.603363097, 0.741370678], ] ], device=torch_device, ) self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :3, :3], expected_slice, atol=0.0005)) expected_slice = torch.tensor([[0.987518311, -0.970520139, -0.994303405]], device=torch_device) self.assertTrue(torch.allclose(outputs.pooler_output[:, :3], expected_slice, atol=0.0005)) @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass @slow def test_inference_question_answering_head_conversational(self): model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-sqa").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 21)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [ [ -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -16.2628059, -10004.082, 15.4330549, 15.4330549, 15.4330549, -9990.42, -16.3270779, -16.3270779, -16.3270779, -16.3270779, -16.3270779, -10004.8506, ] ], device=torch_device, ) self.assertTrue(torch.allclose(logits, expected_tensor, atol=0.015)) @slow def test_inference_question_answering_head_conversational_absolute_embeddings(self): model = TapasForQuestionAnswering.from_pretrained("google/tapas-small-finetuned-sqa", revision="no_reset").to( torch_device ) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 21)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [ [ -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -18.8419304, -10018.0391, 17.7848816, 17.7848816, 17.7848816, -9981.02832, -16.4005489, -16.4005489, -16.4005489, -16.4005489, -16.4005489, -10013.4736, ] ], device=torch_device, ) self.assertTrue(torch.allclose(logits, expected_tensor, atol=0.01)) @slow def test_inference_question_answering_head_weak_supervision(self): model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_batch_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs_on_device = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs_on_device) logits = outputs.logits expected_shape = torch.Size((2, 28)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [-160.375504, -160.375504, -160.375504, -10072.3965, -10070.9414, -10094.9736], [-9861.6123, -9861.6123, -9861.6123, -9861.6123, -9891.01172, 146.600677], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[:, -6:], expected_slice, atol=0.4)) logits_aggregation = outputs.logits_aggregation expected_shape = torch.Size((2, 4)) self.assertEqual(logits_aggregation.shape, expected_shape) expected_tensor = torch.tensor( [[18.8545208, -9.76614857, -6.3128891, -2.93525243], [-4.05782509, 40.0351, -5.35329962, 23.3978653]], device=torch_device, ) self.assertTrue(torch.allclose(logits_aggregation, expected_tensor, atol=0.001)) EXPECTED_PREDICTED_ANSWER_COORDINATES = [[(0, 0)], [(1, 2)]] EXPECTED_PREDICTED_AGGREGATION_INDICES = [0, 1] predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits.detach().cpu(), outputs.logits_aggregation.detach().cpu() ) self.assertEqual(EXPECTED_PREDICTED_ANSWER_COORDINATES, predicted_answer_coordinates) self.assertEqual(EXPECTED_PREDICTED_AGGREGATION_INDICES, predicted_aggregation_indices) @slow def test_training_question_answering_head_weak_supervision(self): model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device) model.to(torch_device) tokenizer = self.default_tokenizer table, queries, answer_coordinates, answer_text, float_answer = prepare_tapas_batch_inputs_for_training() inputs = tokenizer( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding="longest", return_tensors="pt", ) input_ids = inputs["input_ids"].to(torch_device) attention_mask = inputs["attention_mask"].to(torch_device) token_type_ids = inputs["token_type_ids"].to(torch_device) labels = inputs["labels"].to(torch_device) numeric_values = inputs["numeric_values"].to(torch_device) numeric_values_scale = inputs["numeric_values_scale"].to(torch_device) float_answer = torch.FloatTensor(float_answer).to(torch_device) with torch.no_grad(): outputs = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, float_answer=float_answer, ) loss = outputs.loss expected_loss = torch.tensor(3.3527612686157227e-08, device=torch_device) self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-6)) logits = outputs.logits expected_shape = torch.Size((2, 29)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ -160.0156, -160.0156, -160.0156, -160.0156, -160.0156, -10072.2266, -10070.8896, -10092.6006, -10092.6006, ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, -9:], expected_slice, atol=1e-6)) logits_aggregation = outputs.logits_aggregation expected_shape = torch.Size((2, 4)) self.assertEqual(logits_aggregation.shape, expected_shape) expected_slice = torch.tensor([-4.0538, 40.0304, -5.3554, 23.3965], device=torch_device) self.assertTrue(torch.allclose(logits_aggregation[1, -4:], expected_slice, atol=1e-4)) @slow def test_inference_question_answering_head_strong_supervision(self): model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wikisql-supervised").to( torch_device ) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 21)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [ [ -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -18.6185989, -10008.7969, 17.6355762, 17.6355762, 17.6355762, -10002.4404, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -10007.0977, ] ], device=torch_device, ) self.assertTrue(torch.allclose(logits, expected_tensor, atol=0.02)) logits_aggregation = outputs.logits_aggregation expected_shape = torch.Size((1, 4)) self.assertEqual(logits_aggregation.shape, expected_shape) expected_tensor = torch.tensor( [[16.5659733, -3.06624889, -2.34152961, -0.970244825]], device=torch_device ) self.assertTrue(torch.allclose(logits_aggregation, expected_tensor, atol=0.003)) @slow def test_inference_classification_head(self): model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits expected_shape = torch.Size((1, 2)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [[0.795137286, 9.5572]], device=torch_device ) self.assertTrue(torch.allclose(outputs.logits, expected_tensor, atol=0.05)) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch class TapasUtilitiesTest(unittest.TestCase): def _prepare_tables(self): values = torch.tensor( [ [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], ] ) row_index = IndexMap( indices=torch.tensor( [ [[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]], ] ), num_segments=3, batch_dims=1, ) col_index = IndexMap( indices=torch.tensor( [ [[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]], ] ), num_segments=3, batch_dims=1, ) return values, row_index, col_index def test_product_index(self): _, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_index_proj = cell_index.project_outer(cell_index) col_index_proj = cell_index.project_inner(cell_index) ind = cell_index.indices self.assertEqual(cell_index.num_segments, 9) np.testing.assert_array_equal(row_index.indices.numpy(), row_index_proj.indices.numpy()) self.assertEqual(row_index.num_segments, row_index_proj.num_segments) self.assertEqual(row_index.batch_dims, row_index_proj.batch_dims) np.testing.assert_array_equal(col_index.indices.numpy(), col_index_proj.indices.numpy()) self.assertEqual(col_index.batch_dims, col_index_proj.batch_dims) for i in range(3): self.assertEqual(ind[0, i, 0], ind[0, i, 1]) self.assertNotEqual(ind[0, i, 0], ind[0, i, 2]) for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 and j != j_2: self.assertNotEqual(ind[0, i, j], ind[0, i_2, j_2]) for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 or j != j_2: self.assertNotEqual(ind[1, i, j], ind[1, i_2, j_2]) def test_flatten(self): _, row_index, col_index = self._prepare_tables() row_index_flat = flatten(row_index) col_index_flat = flatten(col_index) shape = [3, 4, 5] batched_index = IndexMap(indices=torch.zeros(shape).type(torch.LongTensor), num_segments=1, batch_dims=3) batched_index_flat = flatten(batched_index) np.testing.assert_array_equal( row_index_flat.indices.numpy(), [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5] ) np.testing.assert_array_equal( col_index_flat.indices.numpy(), [0, 0, 1, 0, 0, 1, 0, 0, 1, 3, 4, 5, 3, 4, 5, 3, 4, 5] ) self.assertEqual(batched_index_flat.num_segments.numpy(), np.prod(shape)) np.testing.assert_array_equal(batched_index_flat.indices.numpy(), range(np.prod(shape))) def test_range_index_map(self): batch_shape = [3, 4] num_segments = 5 index = range_index_map(batch_shape, num_segments) self.assertEqual(num_segments, index.num_segments) self.assertEqual(2, index.batch_dims) indices = index.indices np.testing.assert_array_equal(list(indices.size()), [3, 4, 5]) for i in range(batch_shape[0]): for j in range(batch_shape[1]): np.testing.assert_array_equal(indices[i, j, :].numpy(), range(num_segments)) def test_reduce_sum(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_sum, _ = reduce_sum(values, row_index) col_sum, _ = reduce_sum(values, col_index) cell_sum, _ = reduce_sum(values, cell_index) np.testing.assert_allclose(row_sum.numpy(), [[6.0, 3.0, 8.0], [6.0, 3.0, 8.0]]) np.testing.assert_allclose(col_sum.numpy(), [[9.0, 8.0, 0.0], [4.0, 5.0, 8.0]]) np.testing.assert_allclose( cell_sum.numpy(), [[3.0, 3.0, 0.0, 2.0, 1.0, 0.0, 4.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0]], ) def test_reduce_mean(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_mean, _ = reduce_mean(values, row_index) col_mean, _ = reduce_mean(values, col_index) cell_mean, _ = reduce_mean(values, cell_index) np.testing.assert_allclose( row_mean.numpy(), [[6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0], [6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0]] ) np.testing.assert_allclose(col_mean.numpy(), [[9.0 / 6.0, 8.0 / 3.0, 0.0], [4.0 / 3.0, 5.0 / 3.0, 8.0 / 3.0]]) np.testing.assert_allclose( cell_mean.numpy(), [ [3.0 / 2.0, 3.0, 0.0, 2.0 / 2.0, 1.0, 0.0, 4.0 / 2.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0], ], ) def test_reduce_max(self): values = torch.as_tensor([2.0, 1.0, 0.0, 3.0]) index = IndexMap(indices=torch.as_tensor([0, 1, 0, 1]), num_segments=2) maximum, _ = reduce_max(values, index) np.testing.assert_array_equal(maximum.numpy(), [2, 3]) def test_reduce_sum_vectorized(self): values = torch.as_tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]) index = IndexMap(indices=torch.as_tensor([[0, 0, 1]]), num_segments=2, batch_dims=0) sums, new_index = reduce_sum(values, index) np.testing.assert_allclose(sums.numpy(), [3.0, 3.0]) np.testing.assert_array_equal(new_index.indices.numpy(), [0, 1]) np.testing.assert_array_equal(new_index.num_segments.numpy(), 2) np.testing.assert_array_equal(new_index.batch_dims, 0) def test_gather(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) sums, _ = reduce_sum(values, cell_index) cell_sum = gather(sums, cell_index) assert cell_sum.size() == values.size() np.testing.assert_allclose( cell_sum.numpy(), [[[3.0, 3.0, 3.0], [2.0, 2.0, 1.0], [4.0, 4.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]], ) def test_gather_vectorized(self): values = torch.as_tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) index = IndexMap(indices=torch.as_tensor([[0, 1], [1, 0]]), num_segments=2, batch_dims=1) result = gather(values, index) np.testing.assert_array_equal(result.numpy(), [[[1, 2], [3, 4]], [[7, 8], [5, 6]]])
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license inference without aggregation head sqa model only returns logits inference with aggregation head wtq wikisqlsupervised model returns logits and aggregation logits training can happen in 3 main ways case 1 conversational sqa case 2 weak supervision for aggregation wtq case 3 strong supervision for aggregation wikisqlsupervised todo fix the failed tests here we prepare a single tablequestion pair to test tapas inference on here we prepare a batch of 2 tablequestion pairs to test tapas inference on here we prepare a different batch of 2 tablequestion pairs to test tapas training on ideally we want to test this with the weights of tapasintermasklmbasereset but since it s not straightforward to do this with the tf 1 implementation we test it with the weights of the wtq base model i e tapaswtqwikisqlsqaintermasklmbasereset test the sequence output test the pooled output tapasforquestionanswering has 3 possible ways of being finetuned conversational setup sqa weak supervision for aggregation wtq wikisql strong supervision for aggregation wikisqlsupervised we test all of them note that googletapasbasefinetunedsqa should correspond to tapassqaintermasklmbasereset test the logits note that googletapassmallfinetunedsqa should correspond to tapassqaintermasklmsmallreset however here we test the version with absolute position embeddings test the logits note that googletapasbasefinetunedwtq should correspond to tapaswtqwikisqlsqaintermasklmbasereset let s test on a batch test the logits test the aggregation logits test the predicted answer coordinates and aggregation indices note that googletapasbasefinetunedwtq should correspond to tapaswtqwikisqlsqaintermasklmbasereset let s test on a batch the answer should be prepared by the user test the loss test the logits on the first example test the aggregation logits on the second example note that googletapasbasefinetunedwikisqlsupervised should correspond to tapaswikisqlsqaintermasklmbasereset test the logits test the aggregation logits note that googletapasbasefinetunedtabfact should correspond to tapastabfactintermasklmbasereset test the classification logits below tests for tapas utilities which are defined in modelingtftapas py these are based on segmentedtensortest py of the original implementation url https github comgoogleresearchtapasblobmastertapasmodelssegmentedtensortest py prepares two tables both with three distinct rows the first table has two columns 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 the second table has three columns 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 returns segmentedtensors with the tables projections should give back the original indices we use np testing assertarrayequal rather than tensorflow s assertallequal we use np testing assertarrayequal rather than tensorflow s assertallequal the first and second column are identified in the first table all rows are distinct in the first table all cells are distinct in the second table we use np testing assertarrayequal rather than tensorflow s assertallequal we use np testing assertarrayequal rather than tensorflow s assertallequal we use np testing assertarrayequal rather than tensorflow s assertallequal we use np testing assertallclose rather than tensorflow s assertallclose we use np testing assertallclose rather than tensorflow s assertallclose we use np testing assertarrayequal rather than tensorflow s assertallequal we use np testing assertallclose rather than tensorflow s assertallclose we use np testing assertarrayequal rather than tensorflow s assertallequal compute sums and then gather the result should have the same shape as the original table and each element should contain the sum the values in its cell we use np testing assertarrayequal rather than tensorflow s assertallequal we use np testing assertarrayequal rather than tensorflow s assertallequal coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license inference without aggregation head sqa model only returns logits inference with aggregation head wtq wikisql supervised model returns logits and aggregation logits training can happen in 3 main ways case 1 conversational sqa case 2 weak supervision for aggregation wtq case 3 strong supervision for aggregation wikisql supervised todo fix the failed tests here we prepare a single table question pair to test tapas inference on here we prepare a batch of 2 table question pairs to test tapas inference on here we prepare a different batch of 2 table question pairs to test tapas training on ideally we want to test this with the weights of tapas_inter_masklm_base_reset but since it s not straightforward to do this with the tf 1 implementation we test it with the weights of the wtq base model i e tapas_wtq_wikisql_sqa_inter_masklm_base_reset test the sequence output test the pooled output tapasforquestionanswering has 3 possible ways of being fine tuned conversational set up sqa weak supervision for aggregation wtq wikisql strong supervision for aggregation wikisql supervised we test all of them note that google tapas base finetuned sqa should correspond to tapas_sqa_inter_masklm_base_reset test the logits note that google tapas small finetuned sqa should correspond to tapas_sqa_inter_masklm_small_reset however here we test the version with absolute position embeddings test the logits note that google tapas base finetuned wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset let s test on a batch test the logits test the aggregation logits test the predicted answer coordinates and aggregation indices note that google tapas base finetuned wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset let s test on a batch the answer should be prepared by the user test the loss test the logits on the first example test the aggregation logits on the second example note that google tapas base finetuned wikisql supervised should correspond to tapas_wikisql_sqa_inter_masklm_base_reset test the logits test the aggregation logits note that google tapas base finetuned tabfact should correspond to tapas_tabfact_inter_masklm_base_reset test the classification logits below tests for tapas utilities which are defined in modeling_tf_tapas py these are based on segmented_tensor_test py of the original implementation url https github com google research tapas blob master tapas models segmented_tensor_test py prepares two tables both with three distinct rows the first table has two columns 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 the second table has three columns 1 0 2 0 3 0 2 0 0 0 1 0 1 0 3 0 4 0 returns segmentedtensors with the tables projections should give back the original indices we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_array_equal rather than tensorflow s assertallequal the first and second column are identified in the first table all rows are distinct in the first table all cells are distinct in the second table we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_allclose rather than tensorflow s assertallclose we use np testing assert_allclose rather than tensorflow s assertallclose we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_allclose rather than tensorflow s assertallclose we use np testing assert_array_equal rather than tensorflow s assertallequal compute sums and then gather the result should have the same shape as the original table and each element should contain the sum the values in its cell we use np testing assert_array_equal rather than tensorflow s assertallequal we use np testing assert_array_equal rather than tensorflow s assertallequal
from __future__ import annotations import copy import unittest import numpy as np import pandas as pd from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TapasConfig, TapasTokenizer, is_tf_available, ) from transformers.models.auto import get_values from transformers.testing_utils import require_tensorflow_probability, require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, ) from transformers.models.tapas.modeling_tf_tapas import ( IndexMap, ProductIndexMap, flatten, gather, range_index_map, reduce_max, reduce_mean, reduce_sum, ) class TFTapasModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_position_embeddings=512, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], type_sequence_label_size=2, positive_weight=10.0, num_aggregation_labels=4, num_labels=2, aggregation_loss_importance=0.8, use_answer_as_supervision=True, answer_loss_importance=0.001, use_normalized_answer_loss=False, huber_loss_delta=25.0, temperature=1.0, agg_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_agg=False, average_approximation_function="ratio", cell_selection_preference=0.5, answer_loss_cutoff=100, max_num_rows=64, max_num_columns=32, average_logits_per_cell=True, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=True, reset_position_index_per_cell=True, disable_per_token_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.max_position_embeddings = max_position_embeddings self.type_vocab_sizes = type_vocab_sizes self.type_sequence_label_size = type_sequence_label_size self.positive_weight = positive_weight self.num_aggregation_labels = num_aggregation_labels self.num_labels = num_labels self.aggregation_loss_importance = aggregation_loss_importance self.use_answer_as_supervision = use_answer_as_supervision self.answer_loss_importance = answer_loss_importance self.use_normalized_answer_loss = use_normalized_answer_loss self.huber_loss_delta = huber_loss_delta self.temperature = temperature self.agg_temperature = agg_temperature self.use_gumbel_for_cells = use_gumbel_for_cells self.use_gumbel_for_agg = use_gumbel_for_agg self.average_approximation_function = average_approximation_function self.cell_selection_preference = cell_selection_preference self.answer_loss_cutoff = answer_loss_cutoff self.max_num_rows = max_num_rows self.max_num_columns = max_num_columns self.average_logits_per_cell = average_logits_per_cell self.select_one_column = select_one_column self.allow_empty_column_selection = allow_empty_column_selection self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero self.reset_position_index_per_cell = reset_position_index_per_cell self.disable_per_token_loss = disable_per_token_loss self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = [] for type_vocab_size in self.type_vocab_sizes: token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size)) token_type_ids = tf.stack(token_type_ids, axis=2) sequence_labels = None token_labels = None labels = None numeric_values = None numeric_values_scale = None float_answer = None aggregation_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) numeric_values = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32) numeric_values_scale = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32) float_answer = ids_tensor([self.batch_size], vocab_size=2, dtype=tf.float32) aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels) config = self.get_config() return ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) def get_config(self): return TapasConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_sizes=self.type_vocab_sizes, initializer_range=self.initializer_range, positive_weight=self.positive_weight, num_aggregation_labels=self.num_aggregation_labels, num_labels=self.num_labels, aggregation_loss_importance=self.aggregation_loss_importance, use_answer_as_supervision=self.use_answer_as_supervision, answer_loss_importance=self.answer_loss_importance, use_normalized_answer_loss=self.use_normalized_answer_loss, huber_loss_delta=self.huber_loss_delta, temperature=self.temperature, agg_temperature=self.agg_temperature, use_gumbel_for_cells=self.use_gumbel_for_cells, use_gumbel_for_agg=self.use_gumbel_for_agg, average_approximation_function=self.average_approximation_function, cell_selection_preference=self.cell_selection_preference, answer_loss_cutoff=self.answer_loss_cutoff, max_num_rows=self.max_num_rows, max_num_columns=self.max_num_columns, average_logits_per_cell=self.average_logits_per_cell, select_one_column=self.select_one_column, allow_empty_column_selection=self.allow_empty_column_selection, init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero, reset_position_index_per_cell=self.reset_position_index_per_cell, disable_per_token_loss=self.disable_per_token_loss, ) def create_and_check_model( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TFTapasModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) inputs.pop("attention_mask") result = model(inputs) inputs.pop("token_type_ids") result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TFTapasForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": token_labels, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): config.num_labels = self.num_labels model = TFTapasForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "labels": sequence_labels, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): sqa_config = copy.copy(config) sqa_config.num_aggregation_labels = 0 sqa_config.use_answer_as_supervision = False model = TFTapasForQuestionAnswering(config=sqa_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) model = TFTapasForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) model = TFTapasForQuestionAnswering(config=sqa_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) model = TFTapasForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, "numeric_values": numeric_values, "numeric_values_scale": numeric_values_scale, "float_answer": float_answer, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) wikisql_config = copy.copy(config) wikisql_config.use_answer_as_supervision = False model = TFTapasForQuestionAnswering(config=wikisql_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, "aggregation_labels": aggregation_labels, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tensorflow_probability @require_tf class TFTapasModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFTapasModel, TFTapasForMaskedLM, TFTapasForSequenceClassification, TFTapasForQuestionAnswering, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFTapasModel, "fill-mask": TFTapasForMaskedLM, "text-classification": TFTapasForSequenceClassification, "zero-shot": TFTapasForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) inputs_dict["aggregation_labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["numeric_values"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32 ) inputs_dict["numeric_values_scale"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32 ) inputs_dict["float_answer"] = tf.zeros(self.model_tester.batch_size, dtype=tf.float32) elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict def setUp(self): self.model_tester = TFTapasModelTester(self) self.config_tester = ConfigTester(self, config_class=TapasConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_dataset_conversion(self): pass @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_keras_fit(self): pass @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_loss_computation(self): pass def prepare_tapas_single_inputs_for_inference(): data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], } queries = "Which footballer is 33 years old?" table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_inference(): data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"] table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_training(): data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "What's the total number of goals?"] table = pd.DataFrame.from_dict(data) answer_coordinates = [[(0, 0)], [(0, 2), (1, 2)]] answer_text = [["Lionel Messi"], ["1462"]] float_answer = [float("NaN"), float("1462")] return table, queries, answer_coordinates, answer_text, float_answer @require_tensorflow_probability @require_tf class TFTapasModelIntegrationTest(unittest.TestCase): @cached_property def default_tokenizer(self): return TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") @slow def test_inference_no_head(self): model = TFTapasModel.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) expected_slice = tf.constant( [ [ [-0.141581565, -0.599805772, 0.747186482], [-0.143664181, -0.602008104, 0.749218345], [-0.15169853, -0.603363097, 0.741370678], ] ] ) tf.debugging.assert_near(outputs.last_hidden_state[:, :3, :3], expected_slice, atol=0.0005) expected_slice = tf.constant([[0.987518311, -0.970520139, -0.994303405]]) tf.debugging.assert_near(outputs.pooler_output[:, :3], expected_slice, atol=0.0005) @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass @slow def test_inference_question_answering_head_conversational(self): model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-sqa") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -16.262585, -10004.089, 15.435196, 15.435196, 15.435196, -9990.443, -16.327433, -16.327433, -16.327433, -16.327433, -16.327433, -10004.84, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.015) @slow def test_inference_question_answering_head_conversational_absolute_embeddings(self): model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-small-finetuned-sqa") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -18.369339, -10014.692, 17.730324, 17.730324, 17.730324, -9984.974, -18.322773, -18.322773, -18.322773, -18.322773, -18.322773, -10007.267, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.01) @slow def test_inference_question_answering_head_weak_supervision(self): model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer table, queries = prepare_tapas_batch_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="tf") outputs = model(**inputs) logits = outputs.logits expected_shape = tf.TensorShape([2, 28]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [-160.375504, -160.375504, -160.375504, -10072.3965, -10070.9414, -10094.9736], [-9861.6123, -9861.6123, -9861.6123, -9861.6123, -9891.01172, 146.600677], ] ) tf.debugging.assert_near(logits[:, -6:], expected_slice, atol=0.4) logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([2, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant( [[18.8545208, -9.76614857, -6.3128891, -2.93525243], [-4.05782509, 40.0351, -5.35329962, 23.3978653]] ) tf.debugging.assert_near(logits_aggregation, expected_tensor, atol=0.001) EXPECTED_PREDICTED_ANSWER_COORDINATES = [[(0, 0)], [(1, 2)]] EXPECTED_PREDICTED_AGGREGATION_INDICES = [0, 1] predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits, outputs.logits_aggregation ) tf.debugging.assert_equal(EXPECTED_PREDICTED_ANSWER_COORDINATES, predicted_answer_coordinates) tf.debugging.assert_equal(EXPECTED_PREDICTED_AGGREGATION_INDICES, predicted_aggregation_indices) @slow def test_training_question_answering_head_weak_supervision(self): model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer table, queries, answer_coordinates, answer_text, float_answer = prepare_tapas_batch_inputs_for_training() inputs = tokenizer( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding="longest", return_tensors="tf", ) float_answer = tf.constant(float_answer, dtype=tf.float32) outputs = model( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], labels=inputs["labels"], numeric_values=inputs["numeric_values"], numeric_values_scale=inputs["numeric_values_scale"], float_answer=float_answer, ) loss = outputs.loss expected_loss = tf.constant(3.3527612686157227e-08) tf.debugging.assert_near(loss, expected_loss, atol=1e-6) logits = outputs.logits expected_shape = tf.TensorShape([2, 29]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ -160.0156, -160.0156, -160.0156, -160.0156, -160.0156, -10072.2266, -10070.8896, -10092.6006, -10092.6006, ] ) tf.debugging.assert_near(logits[0, -9:], expected_slice, atol=1e-6) logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([2, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant([-4.0538, 40.0304, -5.3554, 23.3965]) tf.debugging.assert_near(logits_aggregation[1, -4:], expected_tensor, atol=1e-4) @slow def test_inference_question_answering_head_strong_supervision(self): model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wikisql-supervised") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -18.6185989, -10008.7969, 17.6355762, 17.6355762, 17.6355762, -10002.4404, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -10007.0977, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.02) logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([1, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant([[16.5659733, -3.06624889, -2.34152961, -0.970244825]]) tf.debugging.assert_near(logits_aggregation, expected_tensor, atol=0.003) @slow def test_inference_classification_head(self): model = TFTapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) logits = outputs.logits expected_shape = tf.TensorShape([1, 2]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant([[0.795137286, 9.5572]]) tf.debugging.assert_near(logits, expected_slice, atol=0.05) @require_tensorflow_probability class TFTapasUtilsTest(unittest.TestCase): def _prepare_tables(self): values = tf.constant( [ [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], ] ) row_index = IndexMap( indices=[ [[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]], ], num_segments=3, batch_dims=1, ) col_index = IndexMap( indices=[ [[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]], ], num_segments=3, batch_dims=1, ) return values, row_index, col_index def test_product_index(self): _, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_index_proj = cell_index.project_outer(cell_index) col_index_proj = cell_index.project_inner(cell_index) ind = cell_index.indices self.assertEqual(cell_index.num_segments, 9) np.testing.assert_array_equal(row_index.indices.numpy(), row_index_proj.indices.numpy()) self.assertEqual(row_index.num_segments, row_index_proj.num_segments) self.assertEqual(row_index.batch_dims, row_index_proj.batch_dims) np.testing.assert_array_equal(col_index.indices.numpy(), col_index_proj.indices.numpy()) self.assertEqual(col_index.batch_dims, col_index_proj.batch_dims) for i in range(3): self.assertEqual(ind[0, i, 0], ind[0, i, 1]) self.assertNotEqual(ind[0, i, 0], ind[0, i, 2]) for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 and j != j_2: self.assertNotEqual(ind[0, i, j], ind[0, i_2, j_2]) for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 or j != j_2: self.assertNotEqual(ind[1, i, j], ind[1, i_2, j_2]) def test_flatten(self): _, row_index, col_index = self._prepare_tables() row_index_flat = flatten(row_index) col_index_flat = flatten(col_index) shape = [3, 4, 5] batched_index = IndexMap(indices=tf.zeros(shape, dtype=tf.int32), num_segments=1, batch_dims=3) batched_index_flat = flatten(batched_index) np.testing.assert_array_equal( row_index_flat.indices.numpy(), [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5] ) np.testing.assert_array_equal( col_index_flat.indices.numpy(), [0, 0, 1, 0, 0, 1, 0, 0, 1, 3, 4, 5, 3, 4, 5, 3, 4, 5] ) self.assertEqual(batched_index_flat.num_segments.numpy(), np.prod(shape)) np.testing.assert_array_equal(batched_index_flat.indices.numpy(), range(np.prod(shape))) def test_range_index_map(self): batch_shape = [3, 4] num_segments = 5 index = range_index_map(batch_shape, num_segments) self.assertEqual(num_segments, index.num_segments) self.assertEqual(2, index.batch_dims) indices = index.indices np.testing.assert_array_equal(list(indices.shape), [3, 4, 5]) for i in range(batch_shape[0]): for j in range(batch_shape[1]): np.testing.assert_array_equal(indices[i, j, :].numpy(), range(num_segments)) def test_reduce_sum(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_sum, _ = reduce_sum(values, row_index) col_sum, _ = reduce_sum(values, col_index) cell_sum, _ = reduce_sum(values, cell_index) np.testing.assert_allclose(row_sum.numpy(), [[6.0, 3.0, 8.0], [6.0, 3.0, 8.0]]) np.testing.assert_allclose(col_sum.numpy(), [[9.0, 8.0, 0.0], [4.0, 5.0, 8.0]]) np.testing.assert_allclose( cell_sum.numpy(), [[3.0, 3.0, 0.0, 2.0, 1.0, 0.0, 4.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0]], ) def test_reduce_mean(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_mean, _ = reduce_mean(values, row_index) col_mean, _ = reduce_mean(values, col_index) cell_mean, _ = reduce_mean(values, cell_index) np.testing.assert_allclose( row_mean.numpy(), [[6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0], [6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0]] ) np.testing.assert_allclose(col_mean.numpy(), [[9.0 / 6.0, 8.0 / 3.0, 0.0], [4.0 / 3.0, 5.0 / 3.0, 8.0 / 3.0]]) np.testing.assert_allclose( cell_mean.numpy(), [ [3.0 / 2.0, 3.0, 0.0, 2.0 / 2.0, 1.0, 0.0, 4.0 / 2.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0], ], ) def test_reduce_max(self): values = tf.convert_to_tensor([2.0, 1.0, 0.0, 3.0]) index = IndexMap(indices=tf.convert_to_tensor([0, 1, 0, 1]), num_segments=2) maximum, _ = reduce_max(values, index) np.testing.assert_array_equal(maximum.numpy(), [2, 3]) def test_reduce_sum_vectorized(self): values = tf.convert_to_tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]) index = IndexMap(indices=tf.convert_to_tensor([0, 0, 1]), num_segments=2, batch_dims=0) sums, new_index = reduce_sum(values, index) np.testing.assert_allclose(sums.numpy(), [[3.0, 5.0, 7.0], [3.0, 4.0, 5.0]]) np.testing.assert_array_equal(new_index.indices.numpy(), [0, 1]) np.testing.assert_array_equal(new_index.num_segments.numpy(), 2) np.testing.assert_array_equal(new_index.batch_dims, 0) def test_gather(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) sums, _ = reduce_sum(values, cell_index) cell_sum = gather(sums, cell_index) assert cell_sum.shape == values.shape np.testing.assert_allclose( cell_sum.numpy(), [[[3.0, 3.0, 3.0], [2.0, 2.0, 1.0], [4.0, 4.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]], ) def test_gather_vectorized(self): values = tf.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) index = IndexMap(indices=tf.convert_to_tensor([[0, 1], [1, 0]]), num_segments=2, batch_dims=1) result = gather(values, index) np.testing.assert_array_equal(result.numpy(), [[[1, 2], [3, 4]], [[7, 8], [5, 6]]])
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch timeseriestransformer model import inspect import tempfile import unittest from huggingfacehub import hfhubdownload from parameterized import parameterized from transformers import istorchavailable from transformers testingutils import isflaky requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin tolerance 1e4 if istorchavailable import torch from transformers import timeseriestransformerconfig timeseriestransformerforprediction timeseriestransformermodel from transformers models timeseriestransformer modelingtimeseriestransformer import timeseriestransformerdecoder timeseriestransformerencoder requiretorch class timeseriestransformermodeltester def init self parent batchsize13 predictionlength7 contextlength14 cardinality19 embeddingdimension5 numtimefeatures4 istrainingtrue hiddensize64 numhiddenlayers2 numattentionheads4 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 lagssequence1 2 3 4 5 self parent parent self batchsize batchsize self predictionlength predictionlength self contextlength contextlength self cardinality cardinality self numtimefeatures numtimefeatures self lagssequence lagssequence self embeddingdimension embeddingdimension self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self encoderseqlength contextlength self decoderseqlength predictionlength def getconfigself return timeseriestransformerconfig encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob predictionlengthself predictionlength contextlengthself contextlength lagssequenceself lagssequence numtimefeaturesself numtimefeatures numstaticrealfeatures1 numstaticcategoricalfeatures1 cardinalityself cardinality embeddingdimensionself embeddingdimension def preparetimeseriestransformerinputsdictself config pastlength config contextlength maxconfig lagssequence staticcategoricalfeatures idstensorself batchsize 1 config cardinality0 staticrealfeatures floatstensorself batchsize 1 pasttimefeatures floatstensorself batchsize pastlength config numtimefeatures pastvalues floatstensorself batchsize pastlength pastobservedmask floatstensorself batchsize pastlength 0 5 decoder inputs futuretimefeatures floatstensorself batchsize config predictionlength config numtimefeatures futurevalues floatstensorself batchsize config predictionlength inputsdict pastvalues pastvalues staticcategoricalfeatures staticcategoricalfeatures staticrealfeatures staticrealfeatures pasttimefeatures pasttimefeatures pastobservedmask pastobservedmask futuretimefeatures futuretimefeatures futurevalues futurevalues return inputsdict def prepareconfigandinputsself config self getconfig inputsdict self preparetimeseriestransformerinputsdictconfig return config inputsdict def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def checkencoderdecodermodelstandaloneself config inputsdict model timeseriestransformermodelconfigconfig totorchdevice eval outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder timeseriestransformerencoder frompretrainedtmpdirname totorchdevice transformerinputs model createnetworkinputsinputsdict encinput transformerinputs config contextlength decinput transformerinputs config contextlength encoderlasthiddenstate2 encoderinputsembedsencinput0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max item 1e3 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder timeseriestransformerdecoder frompretrainedtmpdirname totorchdevice lasthiddenstate2 decoder inputsembedsdecinput encoderhiddenstatesencoderlasthiddenstate 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max item 1e3 requiretorch class timeseriestransformermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses timeseriestransformermodel timeseriestransformerforprediction if istorchavailable else allgenerativemodelclasses timeseriestransformerforprediction if istorchavailable else pipelinemodelmapping featureextraction timeseriestransformermodel if istorchavailable else isencoderdecoder true testpruning false testheadmasking false testmissingkeys false testtorchscript false testinputsembeds false testmodelcommonattributes false def setupself self modeltester timeseriestransformermodeltesterself self configtester configtester self configclasstimeseriestransformerconfig hastextmodalityfalse predictionlengthself modeltester predictionlength def testconfigself self configtester runcommontests def testsaveloadstrictself config self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testencoderdecodermodelstandaloneself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkencoderdecodermodelstandaloneconfigandinputs ignore since we have no tokens embeddings def testresizetokensembeddingsself pass input is staticcategoricalfeatures not inputids def testmodelmaininputnameself modelsignature inspect signaturegetattrtimeseriestransformermodel forward the main input is the name of the argument after self observedmaininputname listmodelsignature parameters keys1 self assertequaltimeseriestransformermodel maininputname observedmaininputname def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pastvalues pasttimefeatures pastobservedmask staticcategoricalfeatures staticrealfeatures futurevalues futuretimefeatures expectedargnames extend futureobservedmask decoderattentionmask headmask decoderheadmask crossattnheadmask encoderoutputs pastkeyvalues outputhiddenstates outputattentions usecache returndict if futureobservedmask in argnames else decoderattentionmask headmask decoderheadmask crossattnheadmask encoderoutputs pastkeyvalues outputhiddenstates outputattentions usecache returndict self assertlistequalargnames lenexpectedargnames expectedargnames def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderseqlength outlen lenoutputs correctoutlen 7 if lasthiddenstate in outputs correctoutlen 1 if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned if loss in outputs correctoutlen 1 if params in outputs correctoutlen 1 self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderseqlength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength encoderseqlength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 2 lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderseqlength unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass parameterized expand 1 5 1 1 5 1 10 15 1 5 3 6 9 10 2 5 1 2 7 2 5 2 3 4 6 4 5 1 5 9 11 4 5 7 8 13 14 def testcreatenetworkinputsself predictionlength contextlength lagssequence historylength maxlagssequence contextlength config timeseriestransformerconfig predictionlengthpredictionlength contextlengthcontextlength lagssequencelagssequence scalingfalse numparallelsamples10 numstaticcategoricalfeatures1 cardinality1 embeddingdimension2 numstaticrealfeatures1 model timeseriestransformermodelconfig batch staticcategoricalfeatures torch tensor0 dtypetorch int64 staticrealfeatures torch tensor0 0 dtypetorch float32 pasttimefeatures torch arangehistorylength dtypetorch float32 view1 historylength 1 pastvalues torch arangehistorylength dtypetorch float32 view1 historylength pastobservedmask torch arangehistorylength dtypetorch float32 view1 historylength test with no futuretarget only one step prediction batchfuturetimefeatures torch arangehistorylength historylength 1 dtypetorch float32 view 1 1 1 transformerinputs loc scale model createnetworkinputsbatch self asserttruescale 1 0 all assert loc 0 0 all ref torch arangemaxlagssequence historylength dtypetorch float32 for idx lag in enumeratelagssequence assert torch iscloseref lag transformerinputs0 idx all test with all future data batchfuturetimefeatures torch arange historylength historylength predictionlength dtypetorch float32 view1 predictionlength 1 batchfuturevalues torch arange historylength historylength predictionlength dtypetorch float32 view1 predictionlength transformerinputs loc scale model createnetworkinputsbatch assert scale 1 0 all assert loc 0 0 all ref torch arangemaxlagssequence historylength predictionlength dtypetorch float32 for idx lag in enumeratelagssequence assert torch iscloseref lag transformerinputs0 idx all test for generation batch popfuturevalues transformerinputs loc scale model createnetworkinputsbatch laggedsequence model getlaggedsubsequences sequencebatchpastvalues subsequenceslength1 shift1 assert that the last element of the lagged sequence is the one after the encoders input assert transformerinputs0 01 1 laggedsequence0 01 futurevalues torch arangehistorylength historylength predictionlength dtypetorch float32 view 1 predictionlength assert that the first element of the futurevalues is offset by lag after the decoders input assert laggedsequence0 01 lagssequence0 futurevalues0 0 isflaky def testretaingradhiddenstatesattentionsself super testretaingradhiddenstatesattentions def preparebatchfilenametrainbatch pt file hfhubdownloadrepoidhfinternaltestingtourismmonthlybatch filenamefilename repotypedataset batch torch loadfile maplocationtorchdevice return batch requiretorch slow class timeseriestransformermodelintegrationtestsunittest testcase def testinferencenoheadself model timeseriestransformermodel frompretrainedhuggingfacetimeseriestransformertourismmonthly to torchdevice batch preparebatch with torch nograd output model pastvaluesbatchpastvalues pasttimefeaturesbatchpasttimefeatures pastobservedmaskbatchpastobservedmask staticcategoricalfeaturesbatchstaticcategoricalfeatures staticrealfeaturesbatchstaticrealfeatures futurevaluesbatchfuturevalues futuretimefeaturesbatchfuturetimefeatures lasthiddenstate expectedshape torch size64 model config contextlength model config dmodel self assertequaloutput shape expectedshape expectedslice torch tensor 0 8196 1 5131 1 4620 1 1268 1 3238 1 5997 1 5098 1 0715 1 7359 devicetorchdevice self asserttruetorch allcloseoutput0 3 3 expectedslice atoltolerance def testinferenceheadself model timeseriestransformerforprediction frompretrained huggingfacetimeseriestransformertourismmonthly totorchdevice batch preparebatchvalbatch pt with torch nograd output model pastvaluesbatchpastvalues pasttimefeaturesbatchpasttimefeatures pastobservedmaskbatchpastobservedmask staticcategoricalfeaturesbatchstaticcategoricalfeatures staticrealfeaturesbatchstaticrealfeatures futuretimefeaturesbatchfuturetimefeatures encoderlasthiddenstate expectedshape torch size64 model config contextlength model config dmodel self assertequaloutput shape expectedshape expectedslice torch tensor 1 2957 1 0280 0 6045 0 7017 0 8193 0 3717 1 0449 0 8149 0 1405 devicetorchdevice self asserttruetorch allcloseoutput0 3 3 expectedslice atoltolerance def testseqtoseqgenerationself model timeseriestransformerforprediction frompretrained huggingfacetimeseriestransformertourismmonthly totorchdevice batch preparebatchvalbatch pt with torch nograd outputs model generate staticcategoricalfeaturesbatchstaticcategoricalfeatures staticrealfeaturesbatchstaticrealfeatures pasttimefeaturesbatchpasttimefeatures pastvaluesbatchpastvalues futuretimefeaturesbatchfuturetimefeatures pastobservedmaskbatchpastobservedmask expectedshape torch size64 model config numparallelsamples model config predictionlength self assertequaloutputs sequences shape expectedshape expectedslice torch tensor2825 2749 3584 9207 6763 9951 devicetorchdevice meanprediction outputs sequences meandim1 self asserttruetorch allclosemeanprediction0 3 expectedslice rtol1e1 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch timeseriestransformer model decoder inputs ignore since we have no tokens embeddings input is static_categorical_features not input_ids the main input is the name of the argument after self signature parameters is an ordereddict so arg_names order is deterministic check that output_attentions also work using config past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine test with no future_target only one step prediction test with all future data test for generation assert that the last element of the lagged sequence is the one after the encoders input assert that the first element of the future_values is offset by lag after the decoders input
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from parameterized import parameterized from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin TOLERANCE = 1e-4 if is_torch_available(): import torch from transformers import ( TimeSeriesTransformerConfig, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, ) from transformers.models.time_series_transformer.modeling_time_series_transformer import ( TimeSeriesTransformerDecoder, TimeSeriesTransformerEncoder, ) @require_torch class TimeSeriesTransformerModelTester: def __init__( self, parent, batch_size=13, prediction_length=7, context_length=14, cardinality=19, embedding_dimension=5, num_time_features=4, is_training=True, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, lags_sequence=[1, 2, 3, 4, 5], ): self.parent = parent self.batch_size = batch_size self.prediction_length = prediction_length self.context_length = context_length self.cardinality = cardinality self.num_time_features = num_time_features self.lags_sequence = lags_sequence self.embedding_dimension = embedding_dimension self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.encoder_seq_length = context_length self.decoder_seq_length = prediction_length def get_config(self): return TimeSeriesTransformerConfig( encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, prediction_length=self.prediction_length, context_length=self.context_length, lags_sequence=self.lags_sequence, num_time_features=self.num_time_features, num_static_real_features=1, num_static_categorical_features=1, cardinality=[self.cardinality], embedding_dimension=[self.embedding_dimension], ) def prepare_time_series_transformer_inputs_dict(self, config): _past_length = config.context_length + max(config.lags_sequence) static_categorical_features = ids_tensor([self.batch_size, 1], config.cardinality[0]) static_real_features = floats_tensor([self.batch_size, 1]) past_time_features = floats_tensor([self.batch_size, _past_length, config.num_time_features]) past_values = floats_tensor([self.batch_size, _past_length]) past_observed_mask = floats_tensor([self.batch_size, _past_length]) > 0.5 future_time_features = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features]) future_values = floats_tensor([self.batch_size, config.prediction_length]) inputs_dict = { "past_values": past_values, "static_categorical_features": static_categorical_features, "static_real_features": static_real_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def prepare_config_and_inputs(self): config = self.get_config() inputs_dict = self.prepare_time_series_transformer_inputs_dict(config) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = TimeSeriesTransformerModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = TimeSeriesTransformerEncoder.from_pretrained(tmpdirname).to(torch_device) transformer_inputs, _, _, _ = model.create_network_inputs(**inputs_dict) enc_input = transformer_inputs[:, : config.context_length, ...] dec_input = transformer_inputs[:, config.context_length :, ...] encoder_last_hidden_state_2 = encoder(inputs_embeds=enc_input)[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = TimeSeriesTransformerDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( inputs_embeds=dec_input, encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class TimeSeriesTransformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TimeSeriesTransformerModel, TimeSeriesTransformerForPrediction) if is_torch_available() else () ) all_generative_model_classes = (TimeSeriesTransformerForPrediction,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": TimeSeriesTransformerModel} if is_torch_available() else {} is_encoder_decoder = True test_pruning = False test_head_masking = False test_missing_keys = False test_torchscript = False test_inputs_embeds = False test_model_common_attributes = False def setUp(self): self.model_tester = TimeSeriesTransformerModelTester(self) self.config_tester = ConfigTester( self, config_class=TimeSeriesTransformerConfig, has_text_modality=False, prediction_length=self.model_tester.prediction_length, ) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_resize_tokens_embeddings(self): pass def test_model_main_input_name(self): model_signature = inspect.signature(getattr(TimeSeriesTransformerModel, "forward")) observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(TimeSeriesTransformerModel.main_input_name, observed_main_input_name) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] expected_arg_names.extend( [ "future_observed_mask", "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] if "future_observed_mask" in arg_names else [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], ) out_len = len(outputs) correct_outlen = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_seq_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_seq_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 2, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], ) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @parameterized.expand( [ (1, 5, [1]), (1, 5, [1, 10, 15]), (1, 5, [3, 6, 9, 10]), (2, 5, [1, 2, 7]), (2, 5, [2, 3, 4, 6]), (4, 5, [1, 5, 9, 11]), (4, 5, [7, 8, 13, 14]), ], ) def test_create_network_inputs(self, prediction_length, context_length, lags_sequence): history_length = max(lags_sequence) + context_length config = TimeSeriesTransformerConfig( prediction_length=prediction_length, context_length=context_length, lags_sequence=lags_sequence, scaling=False, num_parallel_samples=10, num_static_categorical_features=1, cardinality=[1], embedding_dimension=[2], num_static_real_features=1, ) model = TimeSeriesTransformerModel(config) batch = { "static_categorical_features": torch.tensor([[0]], dtype=torch.int64), "static_real_features": torch.tensor([[0.0]], dtype=torch.float32), "past_time_features": torch.arange(history_length, dtype=torch.float32).view(1, history_length, 1), "past_values": torch.arange(history_length, dtype=torch.float32).view(1, history_length), "past_observed_mask": torch.arange(history_length, dtype=torch.float32).view(1, history_length), } batch["future_time_features"] = torch.arange(history_length, history_length + 1, dtype=torch.float32).view( 1, 1, 1 ) transformer_inputs, loc, scale, _ = model.create_network_inputs(**batch) self.assertTrue((scale == 1.0).all()) assert (loc == 0.0).all() ref = torch.arange(max(lags_sequence), history_length, dtype=torch.float32) for idx, lag in enumerate(lags_sequence): assert torch.isclose(ref - lag, transformer_inputs[0, :, idx]).all() batch["future_time_features"] = torch.arange( history_length, history_length + prediction_length, dtype=torch.float32 ).view(1, prediction_length, 1) batch["future_values"] = torch.arange( history_length, history_length + prediction_length, dtype=torch.float32 ).view(1, prediction_length) transformer_inputs, loc, scale, _ = model.create_network_inputs(**batch) assert (scale == 1.0).all() assert (loc == 0.0).all() ref = torch.arange(max(lags_sequence), history_length + prediction_length, dtype=torch.float32) for idx, lag in enumerate(lags_sequence): assert torch.isclose(ref - lag, transformer_inputs[0, :, idx]).all() batch.pop("future_values") transformer_inputs, loc, scale, _ = model.create_network_inputs(**batch) lagged_sequence = model.get_lagged_subsequences( sequence=batch["past_values"], subsequences_length=1, shift=1, ) assert transformer_inputs[0, ..., 0][-1] + 1 == lagged_sequence[0, ..., 0][-1] future_values = torch.arange(history_length, history_length + prediction_length, dtype=torch.float32).view( 1, prediction_length ) assert lagged_sequence[0, ..., 0][-1] + lags_sequence[0] == future_values[0, ..., 0] @is_flaky() def test_retain_grad_hidden_states_attentions(self): super().test_retain_grad_hidden_states_attentions() def prepare_batch(filename="train-batch.pt"): file = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch", filename=filename, repo_type="dataset") batch = torch.load(file, map_location=torch_device) return batch @require_torch @slow class TimeSeriesTransformerModelIntegrationTests(unittest.TestCase): def test_inference_no_head(self): model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly").to( torch_device ) batch = prepare_batch() with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], static_real_features=batch["static_real_features"], future_values=batch["future_values"], future_time_features=batch["future_time_features"], ).last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.8196, -1.5131, 1.4620], [1.1268, -1.3238, 1.5997], [1.5098, -1.0715, 1.7359]], device=torch_device ) self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): model = TimeSeriesTransformerForPrediction.from_pretrained( "huggingface/time-series-transformer-tourism-monthly" ).to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], static_real_features=batch["static_real_features"], future_time_features=batch["future_time_features"], ).encoder_last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-1.2957, -1.0280, -0.6045], [-0.7017, -0.8193, -0.3717], [-1.0449, -0.8149, 0.1405]], device=torch_device ) self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): model = TimeSeriesTransformerForPrediction.from_pretrained( "huggingface/time-series-transformer-tourism-monthly" ).to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): outputs = model.generate( static_categorical_features=batch["static_categorical_features"], static_real_features=batch["static_real_features"], past_time_features=batch["past_time_features"], past_values=batch["past_values"], future_time_features=batch["future_time_features"], past_observed_mask=batch["past_observed_mask"], ) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor([2825.2749, 3584.9207, 6763.9951], device=torch_device) mean_prediction = outputs.sequences.mean(dim=1) self.assertTrue(torch.allclose(mean_prediction[0, -3:], expected_slice, rtol=1e-1))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch timesformer model import copy import unittest import numpy as np from huggingfacehub import hfhubdownload from transformers import timesformerconfig from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import modelforvideoclassificationmapping timesformerforvideoclassification timesformermodel from transformers models timesformer modelingtimesformer import timesformerpretrainedmodelarchivelist if isvisionavailable from transformers import videomaeimageprocessor class timesformermodeltester def init self parent batchsize13 imagesize10 numchannels3 patchsize2 numframes2 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 numlabels10 initializerrange0 02 attentiontypedividedspacetime scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self patchsize patchsize self numframes numframes self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self attentiontype attentiontype self initializerrange initializerrange self scope scope self numlabels numlabels in timesformer the number of spatial tokens equals numframes numpatches per frame 1 cls token self numpatchesperframe imagesize patchsize 2 self seqlength numframes self numpatchesperframe 1 def prepareconfigandinputsself pixelvalues floatstensor self batchsize self numframes self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself config timesformerconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels numframesself numframes hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange attentiontypeself attentiontype config numlabels self numlabels return config def createandcheckmodelself config pixelvalues labels model timesformermodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforvideoclassificationself config pixelvalues labels model timesformerforvideoclassificationconfig model totorchdevice model eval result modelpixelvalues verify the logits shape expectedshape torch sizeself batchsize self numlabels self parent assertequalresult logits shape expectedshape def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class timesformermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses timesformermodel timesformerforvideoclassification if istorchavailable else pipelinemodelmapping featureextraction timesformermodel videoclassification timesformerforvideoclassification if istorchavailable else testpruning false testtorchscript false testresizeembeddings false testheadmasking false def setupself self modeltester timesformermodeltesterself self configtester configtester self configclasstimesformerconfig hastextmodalityfalse hiddensize37 def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict copy deepcopyinputsdict if returnlabels if modelclass in getvaluesmodelforvideoclassificationmapping inputsdictlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice return inputsdict def testconfigself self configtester runcommontests unittest skipreasontimesformer does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforvideoclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforvideoclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in timesformerpretrainedmodelarchivelist 1 model timesformermodel frompretrainedmodelname self assertisnotnonemodel def testattentionoutputsself if not self hasattentions pass else config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses seqlen self modeltester seqlength numframes self modeltester numframes inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers attentions has shape batchsize x numframes x numheads x numpatches per frame 1 x numpatches per frame 1 self assertlistequal listattentions0 shape3 self modeltester numattentionheads seqlen numframes 1 seqlen numframes 1 outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers attentions has shape batchsize x numframes x numheads x numpatches per frame 1 x numpatches per frame 1 self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads seqlen numframes 1 seqlen numframes 1 def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers seqlength self modeltester seqlength self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass we will verify our results on a video of eating spaghetti frame indices used 164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227 def preparevideo file hfhubdownload repoidhfinternaltestingspaghettivideo filenameeatingspaghetti npy repotypedataset video np loadfile return listvideo requiretorch requirevision class timesformermodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself logits were tested with a different mean and std so we use the same here return videomaeimageprocessorimagemean0 5 0 5 0 5 imagestd0 5 0 5 0 5 if isvisionavailable else none slow def testinferenceforvideoclassificationself model timesformerforvideoclassification frompretrainedfacebooktimesformerbasefinetunedk400 to torchdevice imageprocessor self defaultimageprocessor video preparevideo inputs imageprocessorvideo 8 returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 400 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 3016 0 7713 0 4205 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch timesformer model in timesformer the number of spatial tokens equals num_frames num_patches per frame 1 cls token verify the logits shape here we also overwrite some of the tests of test_modeling_common py as timesformer does not use input_ids inputs_embeds attention_mask and seq_length check that output_attentions also work using config attentions has shape batch_size x num_frames x num_heads x num_patches per frame 1 x num_patches per frame 1 check attention is always last and order is fine attentions has shape batch_size x num_frames x num_heads x num_patches per frame 1 x num_patches per frame 1 check that output_hidden_states also work using config we will verify our results on a video of eating spaghetti frame indices used 164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227 logits were tested with a different mean and std so we use the same here forward pass verify the logits
import copy import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class TimesformerModelTester: def __init__( self, parent, batch_size=13, image_size=10, num_channels=3, patch_size=2, num_frames=2, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_labels=10, initializer_range=0.02, attention_type="divided_space_time", scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.patch_size = patch_size self.num_frames = num_frames self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.attention_type = attention_type self.initializer_range = initializer_range self.scope = scope self.num_labels = num_labels self.num_patches_per_frame = (image_size // patch_size) ** 2 self.seq_length = (num_frames) * self.num_patches_per_frame + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): config = TimesformerConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, attention_type=self.attention_type, ) config.num_labels = self.num_labels return config def create_and_check_model(self, config, pixel_values, labels): model = TimesformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_video_classification(self, config, pixel_values, labels): model = TimesformerForVideoClassification(config) model.to(torch_device) model.eval() result = model(pixel_values) expected_shape = torch.Size((self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape, expected_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class TimesformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TimesformerModelTester(self) self.config_tester = ConfigTester( self, config_class=TimesformerConfig, has_text_modality=False, hidden_size=37 ) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_video_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TimesformerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: seq_len = self.model_tester.seq_length num_frames = self.model_tester.num_frames inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class TimesformerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def test_inference_for_video_classification(self): model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400").to( torch_device ) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video[:8], return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.3016, -0.7713, -0.4205]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license out indices are set to the last layer by default for timm models we don t know the number of layers in advance so we set it to 1 whereas for transformers models we set it to lenstagenames 1 kept for backward compatibility signature parameters is an ordereddict so argnames order is deterministic no need to test all models as different heads yield the same functionality encoderdecoderonly models timmbackbone config doesn t have outfeatures attribute check output of last stage is taken if outfeaturesnone outindicesnone check backbone can be initialized with fresh weights coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license out indices are set to the last layer by default for timm models we don t know the number of layers in advance so we set it to 1 whereas for transformers models we set it to len stage_names 1 kept for backward compatibility signature parameters is an ordereddict so arg_names order is deterministic no need to test all models as different heads yield the same functionality encoder decoder only models timmbackbone config doesn t have out_features attribute check output of last stage is taken if out_features none out_indices none check backbone can be initialized with fresh weights
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class TimmBackboneModelTester: def __init__( self, parent, out_indices=None, out_features=None, stage_names=None, backbone="resnet18", batch_size=3, image_size=32, num_channels=3, is_training=True, use_pretrained_backbone=True, ): self.parent = parent self.out_indices = out_indices if out_indices is not None else [4] self.stage_names = stage_names self.out_features = out_features self.backbone = backbone self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.use_pretrained_backbone = use_pretrained_backbone self.is_training = is_training def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return TimmBackboneConfig( image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, ) def create_and_check_model(self, config, pixel_values): model = TimmBackbone(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TimmBackbone,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": TimmBackbone} if is_torch_available() else {} test_resize_embeddings = False test_head_masking = False test_pruning = False has_attentions = False def setUp(self): self.config_class = PretrainedConfig self.model_tester = TimmBackboneModelTester(self) self.config_tester = ConfigTester(self, config_class=self.config_class, has_text_modality=False) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_timm_transformer_backbone_equivalence(self): timm_checkpoint = "resnet18" transformers_checkpoint = "microsoft/resnet-18" timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names), len(transformers_model.stage_names)) self.assertEqual(timm_model.channels, transformers_model.channels) self.assertEqual(timm_model.out_indices, (-1,)) self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names) - 1]) timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True, out_indices=[1, 2, 3]) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint, out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices, transformers_model.out_indices) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(timm_model.channels, transformers_model.channels) @unittest.skip("TimmBackbone doesn't support feed forward chunking") def test_feed_forward_chunking(self): pass @unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute") def test_hidden_states_output(self): pass @unittest.skip("TimmBackbone initialization is managed on the timm side") def test_initialization(self): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds") def test_model_common_attributes(self): pass @unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint") def test_from_pretrained_no_checkpoint(self): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") def test_save_load(self): pass @unittest.skip("model weights aren't tied in TimmBackbone.") def test_tie_model_weights(self): pass @unittest.skip("model weights aren't tied in TimmBackbone.") def test_tied_model_weights_key_ignore(self): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") def test_load_save_without_tied_weights(self): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip("TimmBackbone doesn't have hidden size info in its configuration.") def test_channels(self): pass @unittest.skip("TimmBackbone doesn't support output_attentions.") def test_torchscript_output_attentions(self): pass @unittest.skip("Safetensors is not supported by timm.") def test_can_use_safetensors(self): pass @unittest.skip("Need to use a timm backbone and there is no tiny model available.") def test_model_is_small(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0][-1] hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch trocr model import unittest from transformers import trocrconfig from transformers testingutils import istorchavailable requiretorch torchdevice from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers models trocr modelingtrocr import trocrdecoder trocrforcausallm requiretorch class trocrstandalonedecodermodeltester def init self parent vocabsize99 batchsize13 dmodel16 decoderseqlength7 istrainingtrue isdecodertrue useattentionmasktrue usecachefalse uselabelstrue decoderstarttokenid2 decoderffndim32 decoderlayers2 decoderattentionheads4 maxpositionembeddings30 padtokenid0 bostokenid1 eostokenid2 scopenone self parent parent self batchsize batchsize self decoderseqlength decoderseqlength for common tests self seqlength self decoderseqlength self istraining istraining self useattentionmask useattentionmask self uselabels uselabels self vocabsize vocabsize self dmodel dmodel self hiddensize dmodel self numhiddenlayers decoderlayers self decoderlayers decoderlayers self decoderffndim decoderffndim self decoderattentionheads decoderattentionheads self numattentionheads decoderattentionheads self eostokenid eostokenid self bostokenid bostokenid self padtokenid padtokenid self decoderstarttokenid decoderstarttokenid self usecache usecache self maxpositionembeddings maxpositionembeddings self scope none self decoderkeylength decoderseqlength self basemodeloutlen 2 self decoderattentionidx 1 def prepareconfigandinputsself inputids idstensorself batchsize self decoderseqlength self vocabsize attentionmask none if self useattentionmask attentionmask idstensorself batchsize self decoderseqlength vocabsize2 lmlabels none if self uselabels lmlabels idstensorself batchsize self decoderseqlength self vocabsize config trocrconfig vocabsizeself vocabsize dmodelself dmodel decoderlayersself decoderlayers decoderffndimself decoderffndim decoderattentionheadsself decoderattentionheads eostokenidself eostokenid bostokenidself bostokenid usecacheself usecache padtokenidself padtokenid decoderstarttokenidself decoderstarttokenid maxpositionembeddingsself maxpositionembeddings return config inputids attentionmask lmlabels def createandcheckdecodermodelpast self config inputids attentionmask lmlabels config usecache true model trocrdecoderconfigconfig totorchdevice eval inputids inputids 2 inputidsinputids 0 1 first forward pass outputs modelinputids usecachetrue outputsusecacheconf modelinputids outputsnopast modelinputids usecachefalse self parent asserttruelenoutputs lenoutputsusecacheconf self parent asserttruelenoutputs lenoutputsnopast 1 pastkeyvalues outputspastkeyvalues create hypothetical next token and extent to nextinputids nexttokens idstensor2 1 config vocabsize 1 1 append to next inputids and nextinputids torch catinputids nexttokens dim1 outputfromnopast modelnextinputidslasthiddenstate outputfrompast modelnexttokens pastkeyvaluespastkeyvalueslasthiddenstate select random slice randomsliceidx idstensor1 outputfrompast shape1 item outputfromnopastslice outputfromnopast nextinputids shape1 1 randomsliceidx detach outputfrompastslice outputfrompast 0 randomsliceidx detach test that outputs are equal for slice assert torch allcloseoutputfrompastslice outputfromnopastslice atol1e3 def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask lmlabels configandinputs inputsdict inputids inputids attentionmask attentionmask return config inputsdict requiretorch class trocrstandalonedecodermodeltestmodeltestermixin generationtestermixin pipelinetestermixin unittest testcase allmodelclasses trocrdecoder trocrforcausallm if istorchavailable else allgenerativemodelclasses trocrforcausallm if istorchavailable else pipelinemodelmapping textgeneration trocrforcausallm if istorchavailable else fxcompatible true testpruning false def setupself self modeltester trocrstandalonedecodermodeltesterself istrainingfalse self configtester configtesterself configclasstrocrconfig not implemented currently def testinputsembedsself pass trocr has no base model def testsaveloadfastinitfrombaseself pass trocr has no base model def testsaveloadfastinittobaseself pass def testconfigself self configtester runcommontests def testdecodermodelpastself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastconfigandinputs decoder cannot keep gradients def testretaingradhiddenstatesattentionsself return unittest skipthe model doesn t support left padding and it s not used enough to be worth fixing def testleftpaddingcompatibilityself pass coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch trocr model for common tests first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice not implemented currently trocr has no base model trocr has no base model decoder cannot keep gradients and it s not used enough to be worth fixing
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class TrOCRStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, decoder_attention_heads=4, max_position_embeddings=30, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = TrOCRConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, ) return (config, input_ids, attention_mask, lm_labels) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = TrOCRDecoder(config=config).to(torch_device).eval() input_ids = input_ids[:2] input_ids[input_ids == 0] += 1 outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] next_tokens = ids_tensor((2, 1), config.vocab_size - 1) + 1 next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, lm_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class TrOCRStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () all_generative_model_classes = (TrOCRForCausalLM,) if is_torch_available() else () pipeline_model_mapping = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} fx_compatible = True test_pruning = False def setUp(self): self.model_tester = TrOCRStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=TrOCRConfig) def test_inputs_embeds(self): pass def test_save_load_fast_init_from_base(self): pass def test_save_load_fast_init_to_base(self): pass def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): return @unittest.skip("The model doesn't support left padding") def test_left_padding_compatibility(self): pass
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tvlt feature extraction import itertools import random import unittest import numpy as np from transformers import tvltfeatureextractor isdatasetsavailable from transformers testingutils import requiretorch requiretorchaudio from transformers utils importutils import istorchavailable from testsequencefeatureextractioncommon import sequencefeatureextractiontestmixin if istorchavailable import torch if isdatasetsavailable from datasets import loaddataset globalrng random random copied from tests models whisper testfeatureextractionwhisper floatslist def floatslistshape scale1 0 rngnone namenone make sure that inputs increase in size initialize featureextractor create three inputs of length 800 1000 and 1200 test not batched input test batched test audio masking test 2d numpy arrays are batched automatic decoding with librispeech coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tvlt feature extraction copied from tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor make sure that inputs increase in size initialize feature_extractor create three inputs of length 800 1000 and 1200 test not batched input test batched test audio masking test 2 d numpy arrays are batched automatic decoding with librispeech
import itertools import random import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class TvltFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, spectrogram_length=2048, feature_size=128, num_audio_channels=1, hop_length=512, chunk_length=30, sampling_rate=44100, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.spectrogram_length = spectrogram_length self.feature_size = feature_size self.num_audio_channels = num_audio_channels self.hop_length = hop_length self.chunk_length = chunk_length self.sampling_rate = sampling_rate def prepare_feat_extract_dict(self): return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class TvltFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = TvltFeatureExtractor def setUp(self): self.feat_extract_tester = TvltFeatureExtractionTester(self) def test_feat_extract_properties(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feature_extractor, "spectrogram_length")) self.assertTrue(hasattr(feature_extractor, "feature_size")) self.assertTrue(hasattr(feature_extractor, "num_audio_channels")) self.assertTrue(hasattr(feature_extractor, "hop_length")) self.assertTrue(hasattr(feature_extractor, "chunk_length")) self.assertTrue(hasattr(feature_extractor, "sampling_rate")) def test_call(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] encoded_audios = feature_extractor(np_speech_inputs[0], return_tensors="np", sampling_rate=44100).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) encoded_audios = feature_extractor(np_speech_inputs, return_tensors="np", sampling_rate=44100).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) encoded_audios = feature_extractor( np_speech_inputs, return_tensors="np", sampling_rate=44100, mask_audio=True ).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_audios = feature_extractor(np_speech_inputs, return_tensors="np", sampling_rate=44100).audio_values self.assertTrue(encoded_audios.ndim == 4) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration(self): input_speech = self._load_datasamples(1) feature_extractor = TvltFeatureExtractor() audio_values = feature_extractor(input_speech, return_tensors="pt").audio_values self.assertEquals(audio_values.shape, (1, 1, 192, 128)) expected_slice = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]]) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2], expected_slice, atol=1e-4))
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tvlt image processor import unittest import numpy as np from transformers testingutils import requiretorch requirevision from transformers utils import istorchavailable isvisionavailable from testimageprocessingcommon import imageprocessingtestmixin if istorchavailable import torch if isvisionavailable from pil import image from transformers import tvltimageprocessor def preparevideoimageprocessortester width10 height10 numpifyfalse torchifyfalse pil expects the channel dimension as last dimension this function prepares a batch of videos a list of list of pil images or a list of list of numpy arrays if one specifies numpifytrue or a list of list of pytorch tensors if one specifies torchifytrue one can specify whether the videos are of the same resolution or not initialize imageprocessor create random pil videos test not batched input test batched initialize imageprocessor create random numpy tensors test not batched input test batched initialize imageprocessor create random numpy tensors test not batched input test batched initialize imageprocessor create random pytorch tensors test not batched input test batched coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tvlt image processor this function prepares a video as a list of pil images numpy arrays pytorch tensors pil expects the channel dimension as last dimension this function prepares a batch of videos a list of list of pil images or a list of list of numpy arrays if one specifies numpify true or a list of list of pytorch tensors if one specifies torchify true one can specify whether the videos are of the same resolution or not initialize image_processor create random pil videos test not batched input test batched initialize image_processor create random numpy tensors test not batched input test batched initialize image_processor create random numpy tensors test not batched input test batched initialize image_processor create random pytorch tensors test not batched input test batched
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import TvltImageProcessor def prepare_video(image_processor_tester, width=10, height=10, numpify=False, torchify=False): video = [] for i in range(image_processor_tester.num_frames): video.append(np.random.randint(255, size=(image_processor_tester.num_channels, width, height), dtype=np.uint8)) if not numpify and not torchify: video = [Image.fromarray(np.moveaxis(frame, 0, -1)) for frame in video] if torchify: video = [torch.from_numpy(frame) for frame in video] return video def prepare_video_inputs(image_processor_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" video_inputs = [] for i in range(image_processor_tester.batch_size): if equal_resolution: width = height = image_processor_tester.max_resolution else: width, height = np.random.choice( np.arange(image_processor_tester.min_resolution, image_processor_tester.max_resolution), 2 ) video = prepare_video( image_processor_tester=image_processor_tester, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs class TvltImageProcessorTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, num_frames=4, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_center_crop=True, crop_size=None, ): size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_frames = num_frames self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_center_crop = do_center_crop self.crop_size = crop_size def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class TvltImageProcessorTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = TvltImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = TvltImageProcessorTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "do_center_crop")) self.assertTrue(hasattr(image_processor, "size")) def test_call_pil(self): image_processor = self.image_processing_class(**self.image_processor_dict) video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) encoded_videos = image_processor(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) encoded_videos = image_processor(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): image_processor = self.image_processing_class(**self.image_processor_dict) video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) encoded_videos = image_processor(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) encoded_videos = image_processor(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy_4_channels(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.image_processor_tester.num_channels = 4 video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) encoded_videos = image_processor( video_inputs[0], return_tensors="pt", input_data_format="channels_first", image_mean=0, image_std=1 ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) encoded_videos = image_processor( video_inputs, return_tensors="pt", input_data_format="channels_first", image_mean=0, image_std=1 ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): image_processor = self.image_processing_class(**self.image_processor_dict) video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) encoded_videos = image_processor(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) encoded_videos = image_processor(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), )
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch tvlt model import copy import inspect import unittest import numpy as np from huggingfacehub import hfhubdownload from transformers import tvltconfig isdatasetsavailable isspeechavailable istorchavailable isvisionavailable from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch import torch nn as nn from transformers import tvltforaudiovisualclassification tvltforpretraining tvltmodel from transformers models tvlt modelingtvlt import tvltpretrainedmodelarchivelist if isdatasetsavailable from datasets import loaddataset if isvisionavailable from transformers import tvltimageprocessor if isspeechavailable from transformers import tvltfeatureextractor class tvltmodeltester def init self parent batchsize2 imagesize32 spectrogramlength32 frequencylength16 imagepatchsize2 2 audiopatchsize2 2 numimagechannels3 numaudiochannels1 numframes2 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize128 hiddenactgelu hiddendropoutprob0 0 attentionprobsdropoutprob0 0 initializerrange0 02 layernormeps1e12 qkvbiastrue usemeanpoolingtrue decodernumattentionheads4 decoderhiddensize32 decodernumhiddenlayers2 decoderintermediatesize128 imagemaskratio0 75 audiomaskratio0 15 audiomasktypeframelevel taskmatchingtrue taskmaetrue numlabels1 istrainingtrue self parent parent self batchsize batchsize self imagesize imagesize self spectrogramlength spectrogramlength self frequencylength frequencylength self imagepatchsize imagepatchsize self audiopatchsize audiopatchsize self numimagechannels numimagechannels self numaudiochannels numaudiochannels self numframes numframes self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self layernormeps layernormeps self qkvbias qkvbias self usemeanpooling usemeanpooling self decodernumattentionheads decodernumattentionheads self decoderhiddensize decoderhiddensize self decodernumhiddenlayers decodernumhiddenlayers self decoderintermediatesize decoderintermediatesize self imagemaskratio imagemaskratio self audiomaskratio audiomaskratio self taskmatching taskmatching self taskmae taskmae self numlabels numlabels self expectedpixelseqlen self imagesize self imagepatchsize0 2 self numframes self expectedaudioseqlen self spectrogramlength self audiopatchsize0 self frequencylength self audiopatchsize1 we set the expected sequence length which is used in several tests this is equal to the seq length of number of imagevideo patches number of audio patches self expectedseqlen self expectedpixelseqlen self expectedaudioseqlen 1 self imagemaeoutputdim imagepatchsize0 2 numimagechannels self audiomaeoutputdim audiopatchsize0 audiopatchsize1 numaudiochannels self istraining istraining def prepareconfigandinputsself pixelvalues floatstensor self batchsize self numframes self numimagechannels self imagesize self imagesize audiovalues floatstensor self batchsize self numaudiochannels self spectrogramlength self frequencylength pixelmask floatstensorself batchsize self expectedpixelseqlen audiomask floatstensorself batchsize self expectedaudioseqlen config self getconfig return config pixelvalues audiovalues pixelmask audiomask def prepareconfigandinputsforpretrainingself pixelvalues floatstensor self batchsize self numframes self numimagechannels self imagesize self imagesize audiovalues floatstensor self batchsize self numaudiochannels self spectrogramlength self frequencylength pixelmask floatstensorself batchsize self expectedpixelseqlen audiomask floatstensorself batchsize self expectedaudioseqlen pixelvaluesmixed floatstensor self batchsize self numframes self numimagechannels self imagesize self imagesize pixelmaskmixed floatstensorself batchsize self expectedpixelseqlen labels floatstensorself batchsize config self getconfig return config pixelvalues audiovalues pixelmask audiomask pixelvaluesmixed pixelmaskmixed labels def getconfigself return tvltconfig imagesizeself imagesize spectrogramlengthself spectrogramlength frequencylengthself frequencylength imagepatchsizeself imagepatchsize audiopatchsizeself audiopatchsize numimagechannelsself numimagechannels numaudiochannelsself numaudiochannels numframesself numframes hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange layernormepsself layernormeps qkvbiasself qkvbias usemeanpoolingself usemeanpooling decodernumattentionheadsself decodernumattentionheads decoderhiddensizeself decoderhiddensize decodernumhiddenlayersself decodernumhiddenlayers decoderintermediatesizeself decoderintermediatesize imagemaskratioself imagemaskratio audiomaskratioself audiomaskratio taskmatchingself taskmatching taskmaeself taskmae numlabelsself numlabels def createandcheckmodelself config pixelvalues audiovalues pixelmask audiomask model tvltmodelconfigconfig model totorchdevice model eval result modelpixelvalues audiovalues pixelmaskpixelmask audiomaskaudiomask result modelpixelvalues audiovalues self parent assertequal result lasthiddenstate shape self batchsize self expectedseqlen self hiddensize def createandcheckforaudiovisualclassification self config pixelvalues audiovalues pixelmask audiomask model tvltforaudiovisualclassificationconfigconfig model totorchdevice model eval result modelpixelvalues audiovalues pixelmaskpixelmask audiomaskaudiomask result modelpixelvalues audiovalues self parent assertequalresult logits shape self batchsize self numlabels def createandcheckforpretraining self config pixelvalues audiovalues pixelmask audiomask pixelvaluesmixed pixelmaskmixed labels model tvltforpretrainingconfigconfig model totorchdevice model train result model pixelvalues audiovalues pixelmask audiomask pixelvaluesmixedpixelvaluesmixed pixelmaskmixedpixelmaskmixed labelslabels self parent assertequal result pixellogits shape self batchsize self expectedpixelseqlen self imagemaeoutputdim self parent assertequal result audiologits shape self batchsize self expectedaudioseqlen self audiomaeoutputdim self parent assertequalresult matchinglogits shape self batchsize self numlabels def createandcheckforpretraininginference self config pixelvalues audiovalues pixelmask audiomask pixelvaluesmixed pixelmaskmixed labels model tvltforpretrainingconfigconfig model totorchdevice model eval result model pixelvalues audiovalues pixelmask audiomask pixelvaluesmixedpixelvaluesmixed pixelmaskmixedpixelmaskmixed labelslabels if result pixellogits is not none self parent assertequal result pixellogits shape self batchsize self expectedpixelseqlen self imagemaeoutputdim if result audiologits is not none self parent assertequal result audiologits shape self batchsize self expectedaudioseqlen self audiomaeoutputdim self parent assertequalresult matchinglogits shape self batchsize self numlabels def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues audiovalues pixelmask audiomask configandinputs inputsdict pixelvalues pixelvalues audiovalues audiovalues pixelmask pixelmask audiomask audiomask return config inputsdict def preparepixelvaluesself return floatstensor self batchsize self numframes self numimagechannels self imagesize self imagesize def prepareaudiovaluesself return floatstensor self batchsize self numaudiochannels self spectrogramlength self frequencylength requiretorch class tvltmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tvltmodel tvltforpretraining tvltforaudiovisualclassification if istorchavailable else pipelinemodelmapping featureextraction tvltmodel if istorchavailable else fxcompatible false testpruning false testheadmasking false testtorchscript false testresizeembeddings false maininputname pixelvalues tvltforaudiovisualclassification and tvltforpretraining require special treatment def prepareforclassself inputsdict modelclass returnlabelstrue inputsdict copy deepcopyinputsdict if returnlabels if modelclass name tvltforaudiovisualclassification inputsdictlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice elif modelclass name tvltforpretraining inputsdictlabels torch zeros self modeltester batchsize dtypetorch float devicetorchdevice inputsdictpixelvaluesmixed torch zeros self modeltester batchsize self modeltester numframes self modeltester numimagechannels self modeltester imagesize self modeltester imagesize dtypetorch float devicetorchdevice inputsdictpixelmaskmixed torch zeros self modeltester batchsize self modeltester expectedpixelseqlen dtypetorch float devicetorchdevice return inputsdict def setupself self modeltester tvltmodeltesterself self configtester configtesterself configclasstvltconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasontvlt does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig inputembeddings model getinputembeddings self assertisinstanceinputembeddings tuple for embedding in inputembeddings self assertisinstanceembedding nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues audiovalues self assertlistequalargnames 2 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforaudiovisualclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforaudiovisualclassificationconfigandinputs def testforpretrainingself configandinputs self modeltester prepareconfigandinputsforpretraining self modeltester createandcheckforpretrainingconfigandinputs self modeltester createandcheckforpretraininginferenceconfigandinputs slow def testmodelfrompretrainedself for modelname in tvltpretrainedmodelarchivelist model tvltmodel frompretrainedmodelname self assertisnotnonemodel def testtrainingself if not self modeltester istraining return for modelclass in self allmodelclasses1 config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true model modelclassconfig model totorchdevice model train inputs self prepareforclassinputsdict modelclass for k v in inputs items printk v shape loss modelinputs loss loss backward def testtraininggradientcheckpointingself if not self modeltester istraining return for modelclass in self allmodelclasses1 config inputsdict self modeltester prepareconfigandinputsforcommon config usecache false config returndict true model modelclassconfig model totorchdevice model gradientcheckpointingenable model train inputs self prepareforclassinputsdict modelclass loss modelinputs loss loss backward def testattentionoutputsself if not self hasattentions pass else config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses2 seqlen self modeltester expectedseqlen inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads seqlen seqlen outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads seqlen seqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers seqlength self modeltester expectedseqlen self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses2 inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass we will verify our results on a video of eating spaghetti frame indices used 164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227 def preparevideonumframes8 file hfhubdownload repoidhfinternaltestingspaghettivideo filenameeatingspaghetti npy repotypedataset video np loadfile numframes return listvideo def prepareaudionumsamples1 ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples requiretorch requirevision class tvltmodelintegrationtestunittest testcase cachedproperty def defaultprocessorsself logits were tested with a different mean and std so we use the same here return tvltimageprocessor if isvisionavailable else none tvltfeatureextractor def testinferenceforbasemodelself model tvltmodel frompretrainedzinengtangtvltbase totorchdevice imageprocessor audiofeatureextractor self defaultprocessors video preparevideo audio prepareaudio videoinputs imageprocessorvideo returntensorspt totorchdevice audioinputs audiofeatureextractoraudio returntensorspt totorchdevice inputs inputs updatevideoinputs inputs updateaudioinputs forward pass with torch nograd outputs modelinputs verify the logits expectedlasthiddenstateslice torch tensor0 0186 0 0691 0 0242 0 0398 devicetorchdevice self asserttrue torch allcloseoutputs lasthiddenstate 2 2 expectedlasthiddenstateslice atol1e4 def testinferenceforpretrainingself model tvltforpretraining frompretrainedzinengtangtvltbase totorchdevice imageprocessor audiofeatureextractor self defaultprocessors video preparevideo videomixed preparevideo audio prepareaudio videoinputs imageprocessorvideo returntensorspt maskpixeltrue totorchdevice videomixedinputs imageprocessorvideomixed ismixedtrue returntensorspt totorchdevice audioinputs audiofeatureextractoraudio returntensorspt maskaudiotrue totorchdevice labels torch tensor0 0 devicetorchdevice inputs inputs updatevideoinputs inputs updatevideomixedinputs inputs updateaudioinputs inputs updatelabels labels forward pass with torch nograd outputs modelinputs verify the logits expectedpixellogitsshape torch size1 1568 768 expectedaudiologitsshape torch size1 96 256 expectedmatchinglogitsshape torch size1 1 if outputs pixellogits is not none self assertequaloutputs pixellogits shape expectedpixellogitsshape if outputs audiologits is not none self assertequaloutputs audiologits shape expectedaudiologitsshape self asserttrueoutputs matchinglogits shape expectedmatchinglogitsshape coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch tvlt model we set the expected sequence length which is used in several tests this is equal to the seq length of number of image video patches number of audio patches tvltforaudiovisualclassification and tvltforpretraining require special treatment signature parameters is an ordereddict so arg_names order is deterministic check that output_attentions also work using config check attention is always last and order is fine check that output_hidden_states also work using config we will verify our results on a video of eating spaghetti frame indices used 164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227 automatic decoding with librispeech logits were tested with a different mean and std so we use the same here forward pass verify the logits forward pass verify the logits
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import ( TvltConfig, is_datasets_available, is_speech_available, is_torch_available, is_vision_available, ) from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn as nn from transformers import TvltForAudioVisualClassification, TvltForPreTraining, TvltModel from transformers.models.tvlt.modeling_tvlt import TVLT_PRETRAINED_MODEL_ARCHIVE_LIST if is_datasets_available(): from datasets import load_dataset if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor class TvltModelTester: def __init__( self, parent, batch_size=2, image_size=32, spectrogram_length=32, frequency_length=16, image_patch_size=[2, 2], audio_patch_size=[2, 2], num_image_channels=3, num_audio_channels=1, num_frames=2, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=128, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, qkv_bias=True, use_mean_pooling=True, decoder_num_attention_heads=4, decoder_hidden_size=32, decoder_num_hidden_layers=2, decoder_intermediate_size=128, image_mask_ratio=0.75, audio_mask_ratio=0.15, audio_mask_type="frame-level", task_matching=True, task_mae=True, num_labels=1, is_training=True, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.spectrogram_length = spectrogram_length self.frequency_length = frequency_length self.image_patch_size = image_patch_size self.audio_patch_size = audio_patch_size self.num_image_channels = num_image_channels self.num_audio_channels = num_audio_channels self.num_frames = num_frames self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.use_mean_pooling = use_mean_pooling self.decoder_num_attention_heads = decoder_num_attention_heads self.decoder_hidden_size = decoder_hidden_size self.decoder_num_hidden_layers = decoder_num_hidden_layers self.decoder_intermediate_size = decoder_intermediate_size self.image_mask_ratio = image_mask_ratio self.audio_mask_ratio = audio_mask_ratio self.task_matching = task_matching self.task_mae = task_mae self.num_labels = num_labels self.expected_pixel_seq_len = (self.image_size // self.image_patch_size[0]) ** 2 * self.num_frames self.expected_audio_seq_len = (self.spectrogram_length // self.audio_patch_size[0]) * ( self.frequency_length // self.audio_patch_size[1] ) self.expected_seq_len = self.expected_pixel_seq_len + self.expected_audio_seq_len + 1 self.image_mae_output_dim = image_patch_size[0] ** 2 * num_image_channels self.audio_mae_output_dim = audio_patch_size[0] * audio_patch_size[1] * num_audio_channels self.is_training = is_training def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_image_channels, self.image_size, self.image_size] ) audio_values = floats_tensor( [self.batch_size, self.num_audio_channels, self.spectrogram_length, self.frequency_length] ) pixel_mask = floats_tensor([self.batch_size, self.expected_pixel_seq_len]) audio_mask = floats_tensor([self.batch_size, self.expected_audio_seq_len]) config = self.get_config() return (config, pixel_values, audio_values, pixel_mask, audio_mask) def prepare_config_and_inputs_for_pretraining(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_image_channels, self.image_size, self.image_size] ) audio_values = floats_tensor( [self.batch_size, self.num_audio_channels, self.spectrogram_length, self.frequency_length] ) pixel_mask = floats_tensor([self.batch_size, self.expected_pixel_seq_len]) audio_mask = floats_tensor([self.batch_size, self.expected_audio_seq_len]) pixel_values_mixed = floats_tensor( [self.batch_size, self.num_frames, self.num_image_channels, self.image_size, self.image_size] ) pixel_mask_mixed = floats_tensor([self.batch_size, self.expected_pixel_seq_len]) labels = floats_tensor([self.batch_size]) config = self.get_config() return ( config, pixel_values, audio_values, pixel_mask, audio_mask, pixel_values_mixed, pixel_mask_mixed, labels, ) def get_config(self): return TvltConfig( image_size=self.image_size, spectrogram_length=self.spectrogram_length, frequency_length=self.frequency_length, image_patch_size=self.image_patch_size, audio_patch_size=self.audio_patch_size, num_image_channels=self.num_image_channels, num_audio_channels=self.num_audio_channels, num_frames=self.num_frames, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, qkv_bias=self.qkv_bias, use_mean_pooling=self.use_mean_pooling, decoder_num_attention_heads=self.decoder_num_attention_heads, decoder_hidden_size=self.decoder_hidden_size, decoder_num_hidden_layers=self.decoder_num_hidden_layers, decoder_intermediate_size=self.decoder_intermediate_size, image_mask_ratio=self.image_mask_ratio, audio_mask_ratio=self.audio_mask_ratio, task_matching=self.task_matching, task_mae=self.task_mae, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, audio_values, pixel_mask, audio_mask): model = TvltModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values, audio_values, pixel_mask=pixel_mask, audio_mask=audio_mask) result = model(pixel_values, audio_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) ) def create_and_check_for_audiovisual_classification( self, config, pixel_values, audio_values, pixel_mask, audio_mask ): model = TvltForAudioVisualClassification(config=config) model.to(torch_device) model.eval() result = model(pixel_values, audio_values, pixel_mask=pixel_mask, audio_mask=audio_mask) result = model(pixel_values, audio_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_pretraining( self, config, pixel_values, audio_values, pixel_mask, audio_mask, pixel_values_mixed, pixel_mask_mixed, labels, ): model = TvltForPreTraining(config=config) model.to(torch_device) model.train() result = model( pixel_values, audio_values, pixel_mask, audio_mask, pixel_values_mixed=pixel_values_mixed, pixel_mask_mixed=pixel_mask_mixed, labels=labels, ) self.parent.assertEqual( result.pixel_logits.shape, (self.batch_size, self.expected_pixel_seq_len, self.image_mae_output_dim) ) self.parent.assertEqual( result.audio_logits.shape, (self.batch_size, self.expected_audio_seq_len, self.audio_mae_output_dim) ) self.parent.assertEqual(result.matching_logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_pretraining_inference( self, config, pixel_values, audio_values, pixel_mask, audio_mask, pixel_values_mixed, pixel_mask_mixed, labels, ): model = TvltForPreTraining(config=config) model.to(torch_device) model.eval() result = model( pixel_values, audio_values, pixel_mask, audio_mask, pixel_values_mixed=pixel_values_mixed, pixel_mask_mixed=pixel_mask_mixed, labels=labels, ) if result.pixel_logits is not None: self.parent.assertEqual( result.pixel_logits.shape, (self.batch_size, self.expected_pixel_seq_len, self.image_mae_output_dim) ) if result.audio_logits is not None: self.parent.assertEqual( result.audio_logits.shape, (self.batch_size, self.expected_audio_seq_len, self.audio_mae_output_dim) ) self.parent.assertEqual(result.matching_logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, pixel_values, audio_values, pixel_mask, audio_mask) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "audio_values": audio_values, "pixel_mask": pixel_mask, "audio_mask": audio_mask, } return config, inputs_dict def prepare_pixel_values(self): return floats_tensor( [self.batch_size, self.num_frames, self.num_image_channels, self.image_size, self.image_size] ) def prepare_audio_values(self): return floats_tensor( [self.batch_size, self.num_audio_channels, self.spectrogram_length, self.frequency_length] ) @require_torch class TvltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TvltModel, TvltForPreTraining, TvltForAudioVisualClassification) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": TvltModel} if is_torch_available() else {} fx_compatible = False test_pruning = False test_headmasking = False test_torchscript = False test_resize_embeddings = False main_input_name = "pixel_values" def _prepare_for_class(self, inputs_dict, model_class, return_labels=True): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class.__name__ == "TvltForAudioVisualClassification": inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size,), dtype=torch.long, device=torch_device ) elif model_class.__name__ == "TvltForPreTraining": inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size,), dtype=torch.float, device=torch_device ) inputs_dict["pixel_values_mixed"] = torch.zeros( ( self.model_tester.batch_size, self.model_tester.num_frames, self.model_tester.num_image_channels, self.model_tester.image_size, self.model_tester.image_size, ), dtype=torch.float, device=torch_device, ) inputs_dict["pixel_mask_mixed"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.expected_pixel_seq_len), dtype=torch.float, device=torch_device, ) return inputs_dict def setUp(self): self.model_tester = TvltModelTester(self) self.config_tester = ConfigTester(self, config_class=TvltConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="TVLT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) input_embeddings = model.get_input_embeddings() self.assertIsInstance(input_embeddings, (tuple)) for embedding in input_embeddings: self.assertIsInstance(embedding, (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "audio_values"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_audiovisual_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_audiovisual_classification(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_pretraining() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) self.model_tester.create_and_check_for_pretraining_inference(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TVLT_PRETRAINED_MODEL_ARCHIVE_LIST: model = TvltModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[1:]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class) for k, v in inputs.items(): print(k, v.shape) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[1:]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class) loss = model(**inputs).loss loss.backward() def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes[2:]: seq_len = self.model_tester.expected_seq_len inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[2:]: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def prepare_video(num_frames=8): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file)[:num_frames] return list(video) def prepare_audio(num_samples=1): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch @require_vision class TvltModelIntegrationTest(unittest.TestCase): @cached_property def default_processors(self): return ( TvltImageProcessor() if is_vision_available() else None, TvltFeatureExtractor(), ) def test_inference_for_base_model(self): model = TvltModel.from_pretrained("ZinengTang/tvlt-base").to(torch_device) image_processor, audio_feature_extractor = self.default_processors video = prepare_video() audio = prepare_audio() video_inputs = image_processor(video, return_tensors="pt").to(torch_device) audio_inputs = audio_feature_extractor(audio, return_tensors="pt").to(torch_device) inputs = {} inputs.update(video_inputs) inputs.update(audio_inputs) with torch.no_grad(): outputs = model(**inputs) expected_last_hidden_state_slice = torch.tensor([[-0.0186, -0.0691], [0.0242, -0.0398]], device=torch_device) self.assertTrue( torch.allclose(outputs.last_hidden_state[:, :2, :2], expected_last_hidden_state_slice, atol=1e-4) ) def test_inference_for_pretraining(self): model = TvltForPreTraining.from_pretrained("ZinengTang/tvlt-base").to(torch_device) image_processor, audio_feature_extractor = self.default_processors video = prepare_video() video_mixed = prepare_video() audio = prepare_audio() video_inputs = image_processor(video, return_tensors="pt", mask_pixel=True).to(torch_device) video_mixed_inputs = image_processor(video_mixed, is_mixed=True, return_tensors="pt").to(torch_device) audio_inputs = audio_feature_extractor(audio, return_tensors="pt", mask_audio=True).to(torch_device) labels = torch.tensor([[0.0]], device=torch_device) inputs = {} inputs.update(video_inputs) inputs.update(video_mixed_inputs) inputs.update(audio_inputs) inputs.update({"labels": labels}) with torch.no_grad(): outputs = model(**inputs) expected_pixel_logits_shape = torch.Size([1, 1568, 768]) expected_audio_logits_shape = torch.Size([1, 96, 256]) expected_matching_logits_shape = torch.Size([1, 1]) if outputs.pixel_logits is not None: self.assertEqual(outputs.pixel_logits.shape, expected_pixel_logits_shape) if outputs.audio_logits is not None: self.assertEqual(outputs.audio_logits.shape, expected_audio_logits_shape) self.assertTrue(outputs.matching_logits.shape, expected_matching_logits_shape)
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test if it raises when no input is passed 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test if it raises when no input is passed
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class TvltProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "ZinengTang/tvlt-base" self.tmpdirname = tempfile.mkdtemp() def get_image_processor(self, **kwargs): return TvltImageProcessor.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return TvltFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = TvltProcessor.from_pretrained(self.tmpdirname) self.assertIsInstance(processor.feature_extractor, TvltFeatureExtractor) self.assertIsInstance(processor.image_processor, TvltImageProcessor) def test_feature_extractor(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) audio = np.ones([12000]) audio_dict = feature_extractor(audio, return_tensors="np") input_processor = processor(audio=audio, return_tensors="np") for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum(), input_processor[key].sum(), delta=1e-2) def test_image_processor(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) images = np.ones([3, 224, 224]) image_dict = image_processor(images, return_tensors="np") input_processor = processor(images=images, return_tensors="np") for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum(), input_processor[key].sum(), delta=1e-2) def test_processor(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) audio = np.ones([12000]) images = np.ones([3, 224, 224]) inputs = processor(audio=audio, images=images) self.assertListEqual(list(inputs.keys()), ["audio_values", "audio_mask", "pixel_values", "pixel_mask"]) with pytest.raises(ValueError): processor() def test_model_input_names(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, image_processor.model_input_names + feature_extractor.model_input_names, msg="`processor` and `image_processor`+`feature_extractor` model input names do not match", )
codingutf8 2023 the intel team s the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function computes the expected height and width when providing images to tvpimageprocessor assuming doresize is set to true with a scalar size initialize imageprocessing create random pil videos test not batched input test batched initialize imageprocessing create random numpy tensors test not batched input test batched initialize imageprocessing create random numpy tensors test not batched input test batched initialize imageprocessing create random pytorch tensors test not batched input test batched coding utf 8 2023 the intel team s the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function computes the expected height and width when providing images to tvpimageprocessor assuming do_resize is set to true with a scalar size initialize image_processing create random pil videos test not batched input test batched initialize image_processing create random numpy tensors test not batched input test batched initialize image_processing create random numpy tensors test not batched input test batched initialize image_processing create random pytorch tensors test not batched input test batched
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.image_transforms import PaddingMode from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import TvpImageProcessor class TvpImageProcessingTester(unittest.TestCase): def __init__( self, parent, do_resize: bool = True, size: Dict[str, int] = {"longest_edge": 40}, do_center_crop: bool = False, crop_size: Dict[str, int] = None, do_rescale: bool = False, rescale_factor: Union[int, float] = 1 / 255, do_pad: bool = True, pad_size: Dict[str, int] = {"height": 80, "width": 80}, fill: int = None, pad_mode: PaddingMode = None, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073], image_std: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711], batch_size=2, min_resolution=40, max_resolution=80, num_channels=3, num_frames=2, ): self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size self.fill = fill self.pad_mode = pad_mode self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.num_frames = num_frames def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "do_center_crop": self.do_center_crop, "do_pad": self.do_pad, "pad_size": self.pad_size, } def get_expected_values(self, image_inputs, batched=False): if not batched: return (int(self.pad_size["height"]), int(self.pad_size["width"])) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class TvpImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = TvpImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = TvpImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "pad_size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"longest_edge": 40}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size={"longest_edge": 12}) self.assertEqual(image_processor.size, {"longest_edge": 12}) def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy_4_channels(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing( video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing( video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
codingutf8 2023 the intel team s the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch tvp model import unittest from transformers import resnetconfig tvpconfig from transformers testingutils import requiretorch requirevision torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import tvpforvideogrounding tvpmodel if isvisionavailable from pil import image from transformers import tvpimageprocessor copied from test models videomae testmodelingvideomae videomaemodeltester with videomaetvp class tvpmodeltester def init self parent batchsize1 seqlength2 alpha1 0 beta0 1 visualpromptertypeframepad visualprompterapplyreplace numframes2 maximgsize448 visualpromptsize96 vocabsize100 hiddensize32 intermediatesize32 numhiddenlayers2 numattentionheads4 maxpositionembeddings30 maxgridcolpositionembeddings30 maxgridrowpositionembeddings30 hiddendropoutprob0 1 hiddenactgelu layernormeps1e12 initializerrange0 02 padtokenid0 typevocabsize2 attentionprobsdropoutprob0 1 self parent parent self batchsize batchsize self inputidlength seqlength self seqlength seqlength 10 784 include text prompt length and visual input length self alpha alpha self beta beta self visualpromptertype visualpromptertype self visualprompterapply visualprompterapply self numframes numframes self maximgsize maximgsize self visualpromptsize visualpromptsize self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self maxgridcolpositionembeddings maxgridcolpositionembeddings self maxgridrowpositionembeddings maxgridrowpositionembeddings self layernormeps layernormeps self initializerrange initializerrange self padtokenid padtokenid self typevocabsize typevocabsize self istraining false self numchannels 3 def prepareconfigandinputsself inputids idstensorself batchsize self inputidlength self vocabsize attentionmask randomattentionmaskself batchsize self inputidlength pixelvalues floatstensor self batchsize self numframes self numchannels self maximgsize self maximgsize config self getconfig return config inputids pixelvalues attentionmask def getconfigself resnetconfig resnetconfig numchannels3 embeddingssize64 hiddensizes64 128 depths2 2 hiddenactrelu outfeaturesstage2 outindices2 return tvpconfig backboneconfigresnetconfig alphaself alpha betaself beta visualpromptertypeself visualpromptertype visualprompterapplyself visualprompterapply numframesself numframes maximgsizeself maximgsize visualpromptsizeself visualpromptsize vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings maxgridcolpositionembeddingsself maxgridcolpositionembeddings maxgridrowpositionembeddingsself maxgridrowpositionembeddings layernormepsself layernormeps initializerrangeself initializerrange padtokenidself padtokenid typevocabsizeself typevocabsize def createandcheckmodelself config inputids pixelvalues attentionmask model tvpmodelconfigconfig model totorchdevice model eval result modelinputids pixelvalues attentionmask self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids pixelvalues attentionmask configandinputs inputsdict inputids inputids pixelvalues pixelvalues attentionmask attentionmask return config inputsdict requiretorch class tvpmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tvpmodel tvpforvideogrounding if istorchavailable else pipelinemodelmapping featureextraction tvpmodel temporalvideogrounding tvpforvideogrounding if istorchavailable else todo enable this once this model gets more usage testtorchscript false def setupself self modeltester tvpmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasontvp does not use inputsembeds def testinputsembedsself pass unittest skipreasontvpmodel does not have inputoutput embeddings def testmodelcommonattributesself pass override as the logitscale parameter initilization is different for tvp def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad params are randomly initialized self assertalmostequal param data mean item 0 0 delta1 0 msgfparameter name of model modelclass seems not properly initialized we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requirevision requiretorch class tvpmodelintegrationtestsunittest testcase cachedproperty def defaultimageprocessorself return tvpimageprocessor frompretrainedjiqingtinyrandomtvp if isvisionavailable else none def testinferencenoheadself model tvpmodel frompretrainedjiqingtinyrandomtvp totorchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice inputids torch tensor1 2 attentionmask torch tensor1 1 encoding updateinputids inputids attentionmask attentionmask with torch nograd outputs modelencoding expectedshape torch size1 796 128 assert outputs lasthiddenstate shape expectedshape expectedslice torch tensor 0 4902 0 4121 1 7872 0 2184 2 1211 0 9371 0 1180 0 5003 0 1727 totorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 def testinferencewithheadself model tvpforvideogrounding frompretrainedjiqingtinyrandomtvp totorchdevice imageprocessor self defaultimageprocessor image prepareimg encoding imageprocessorimagesimage returntensorspt totorchdevice inputids torch tensor1 2 attentionmask torch tensor1 1 encoding updateinputids inputids attentionmask attentionmask with torch nograd outputs modelencoding expectedshape torch size1 2 assert outputs logits shape expectedshape expectedslice torch tensor0 5061 0 4988 totorchdevice self asserttruetorch allcloseoutputs logits expectedslice atol1e4 coding utf 8 2023 the intel team s the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch tvp model copied from test models videomae test_modeling_videomae videomaemodeltester with videomae tvp include text prompt length and visual input length here we also overwrite some of the tests of test_modeling_common py as tvp does not use inputs_embeds the seq_length in tvp contain textual and visual inputs and prompt todo enable this once this model gets more usage override as the logit_scale parameter initilization is different for tvp params are randomly initialized we will verify our results on an image of cute cats
import unittest from transformers import ResNetConfig, TvpConfig from transformers.testing_utils import require_torch, require_vision, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import TvpForVideoGrounding, TvpModel if is_vision_available(): from PIL import Image from transformers import TvpImageProcessor class TVPModelTester: def __init__( self, parent, batch_size=1, seq_length=2, alpha=1.0, beta=0.1, visual_prompter_type="framepad", visual_prompter_apply="replace", num_frames=2, max_img_size=448, visual_prompt_size=96, vocab_size=100, hidden_size=32, intermediate_size=32, num_hidden_layers=2, num_attention_heads=4, max_position_embeddings=30, max_grid_col_position_embeddings=30, max_grid_row_position_embeddings=30, hidden_dropout_prob=0.1, hidden_act="gelu", layer_norm_eps=1e-12, initializer_range=0.02, pad_token_id=0, type_vocab_size=2, attention_probs_dropout_prob=0.1, ): self.parent = parent self.batch_size = batch_size self.input_id_length = seq_length self.seq_length = seq_length + 10 + 784 self.alpha = alpha self.beta = beta self.visual_prompter_type = visual_prompter_type self.visual_prompter_apply = visual_prompter_apply self.num_frames = num_frames self.max_img_size = max_img_size self.visual_prompt_size = visual_prompt_size self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_grid_col_position_embeddings = max_grid_col_position_embeddings self.max_grid_row_position_embeddings = max_grid_row_position_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.pad_token_id = pad_token_id self.type_vocab_size = type_vocab_size self.is_training = False self.num_channels = 3 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.input_id_length], self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.input_id_length]) pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.max_img_size, self.max_img_size] ) config = self.get_config() return (config, input_ids, pixel_values, attention_mask) def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=64, hidden_sizes=[64, 128], depths=[2, 2], hidden_act="relu", out_features=["stage2"], out_indices=[2], ) return TvpConfig( backbone_config=resnet_config, alpha=self.alpha, beta=self.beta, visual_prompter_type=self.visual_prompter_type, visual_prompter_apply=self.visual_prompter_apply, num_frames=self.num_frames, max_img_size=self.max_img_size, visual_prompt_size=self.visual_prompt_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_grid_col_position_embeddings=self.max_grid_col_position_embeddings, max_grid_row_position_embeddings=self.max_grid_row_position_embeddings, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, type_vocab_size=self.type_vocab_size, ) def create_and_check_model(self, config, input_ids, pixel_values, attention_mask): model = TvpModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, pixel_values, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "pixel_values": pixel_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class TVPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TvpModel, TvpForVideoGrounding) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": TvpModel, "temporal-video-grounding": TvpForVideoGrounding} if is_torch_available() else {} ) test_torchscript = False def setUp(self): self.model_tester = TVPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="TVP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TVPModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertAlmostEqual( param.data.mean().item(), 0.0, delta=1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_torch class TvpModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return TvpImageProcessor.from_pretrained("Jiqing/tiny-random-tvp") if is_vision_available() else None def test_inference_no_head(self): model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) input_ids = torch.tensor([[1, 2]]) attention_mask = torch.tensor([[1, 1]]) encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 796, 128)) assert outputs.last_hidden_state.shape == expected_shape expected_slice = torch.tensor( [[-0.4902, -0.4121, -1.7872], [-0.2184, 2.1211, -0.9371], [0.1180, 0.5003, -0.1727]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_with_head(self): model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) input_ids = torch.tensor([[1, 2]]) attention_mask = torch.tensor([[1, 1]]) encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 2)) assert outputs.logits.shape == expected_shape expected_slice = torch.tensor([[0.5061, 0.4988]]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits, expected_slice, atol=1e-4))
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from test models t5 testmodelingt5 t5modeltester with t5umt5 for common tests for common tests we need to clamp the input ids here to avoid having pad token in between this is because for nllbmoe the positionids are prepared such that all pad tokens have pos id 2 and rest are between 2 seqlength and the seqlength here is seqlength numpadtokens but when using past there is no way of knowing if the past input ids had pad tokens in them which results in incorrect seqlenth and which in turn results in positionids being off by numpadtokens in past input there should be numlayers key value embeddings stored in decoderpast there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoderpast tuple first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice self parent assertequallenoutputs 4 the small umt5 model needs higher percentages for cpump tests qapipelinetests is not working well with slow tokenizers for some models and we don t want to touch the file srctransformersdataprocessorssquad py where this test fails for this model test that the model can be serialized and restored properly avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb umt5forsequenceclassification does not support inputsembeds explicitly pass decoderheadmask as it is required from t5 model when headmask specified we check the state of decoderattentions and crossattentions just from the last step for comparison run the kaggle notbook available here https www kaggle comarthurzuckerumt5inference fmt off fmt on 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from test models t5 test_modeling_t5 t5modeltester with t5 umt5 for common tests for common tests we need to clamp the input ids here to avoid having pad token in between this is because for nllbmoe the position_ids are prepared such that all pad tokens have pos id 2 and rest are between 2 seq_length and the seq_length here is seq_length num_pad_tokens but when using past there is no way of knowing if the past input ids had pad tokens in them which results in incorrect seq_lenth and which in turn results in position_ids being off by num_pad_tokens in past input eos token t5 forces 100 extra tokens there should be num_layers key value embeddings stored in decoder_past there should be a self attn key a self attn value a cross attn key and a cross attn value stored in each decoder_past tuple first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice self parent assertequal len outputs 4 the small umt5 model needs higher percentages for cpu mp tests qapipelinetests is not working well with slow tokenizers for some models and we don t want to touch the file src transformers data processors squad py where this test fails for this model to be sure we have no nan fstm still requires this hack fstm should probably be refactored similar to bart afterward test that the model can be serialized and restored properly avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb umt5forsequenceclassification does not support inputs_embeds explicitly pass decoder_head_mask as it is required from t5 model when head_mask specified we check the state of decoder_attentions and cross_attentions just from the last step for comparison run the kaggle notbook available here https www kaggle com arthurzucker umt5 inference fmt off fmt on
import copy import os import pickle import tempfile import unittest from transformers import T5Config, is_torch_available from transformers.models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.utils import is_torch_fx_available from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace if is_torch_available(): import torch from transformers import ( AutoTokenizer, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering, UMT5ForSequenceClassification, UMT5Model, ) class UMT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=7, is_training=True, use_attention_mask=True, use_labels=False, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def get_large_model_config(self): return T5Config.from_pretrained("google/umt5-base") def prepare_inputs_dict( self, config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones( config.num_decoder_layers, config.num_attention_heads, device=torch_device ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) input_ids = input_ids.clamp(self.pad_token_id + 2) input_ids[:, -1] = self.eos_token_id decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1) config = self.get_config() config.encoder_attention_heads = config.num_attention_heads input_dict = self.prepare_inputs_dict(config, input_ids, decoder_input_ids) return config, input_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_pipeline_config(self): return T5Config( vocab_size=166, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UMT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) self.parent.assertEqual(len(decoder_past), config.num_layers) self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UMT5Model(config=config).get_decoder().to(torch_device).eval() outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_model_fp16_forward( self, config, input_dict, ): model = UMT5Model(config=config).to(torch_device).half().eval() output = model(**input_dict)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_sequence_classification_head( self, config, input_dict, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = UMT5ForSequenceClassification(config=config).to(torch_device).eval() outputs = model(**input_dict, labels=labels) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) @require_torch class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (UMT5Model, UMT5ForConditionalGeneration, UMT5ForSequenceClassification, UMT5ForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (UMT5ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": UMT5ForConditionalGeneration, "feature-extraction": UMT5Model, "question-answering": UMT5ForQuestionAnswering, "summarization": UMT5ForConditionalGeneration, "text-classification": UMT5ForSequenceClassification, "text2text-generation": UMT5ForConditionalGeneration, "translation": UMT5ForConditionalGeneration, "zero-shot": UMT5ForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = True test_torchscript = True model_split_percents = [0.8, 0.9] def setUp(self): self.model_tester = UMT5ModelTester(self) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) configs_no_init.return_dict = False for model_class in self.all_model_classes: if model_class.__name__ == "UMT5ForSequenceClassification": continue model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) self.clear_torch_jit_class_registry() def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (UMT5Model, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_with_sequence_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = UMT5Model(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/t5_test.onnx", export_params=True, opset_version=9, input_names=["input_ids", "decoder_input_ids"], ) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] model = UMT5ForConditionalGeneration(config).eval() model.to(torch_device) head_masking = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), } for attn_name, (name, mask) in zip(attention_names, head_masking.items()): head_masks = {name: mask} if name == "head_mask": head_masks["decoder_head_mask"] = torch.ones( config.num_decoder_layers, config.num_heads, device=torch_device ) out = model.generate( config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=True, return_dict_in_generate=True, **head_masks, ) attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch @require_sentencepiece @require_tokenizers class Umt5IntegrationTest(unittest.TestCase): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def test_small_integration_test(self): model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=True).to(torch_device) tokenizer = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=False, legacy=False) input_text = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] input_ids = tokenizer(input_text, return_tensors="pt", padding=True).input_ids EXPECTED_IDS = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) torch.testing.assert_allclose(input_ids, EXPECTED_IDS) generated_ids = model.generate(input_ids.to(torch_device)) EXPECTED_FILLING = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] filling = tokenizer.batch_decode(generated_ids) self.assertEqual(filling, EXPECTED_FILLING)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch unispeech model import math import unittest import numpy as np import pytest from datasets import loaddataset from transformers import unispeechconfig istorchavailable from transformers testingutils import requiresoundfile requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import unispeechforctc unispeechforpretraining unispeechforsequenceclassification unispeechmodel wav2vec2featureextractor wav2vec2processor class unispeechmodeltester def init self parent batchsize13 seqlength1024 speech is longer istrainingfalse hiddensize16 featextractnormgroup featextractdropout0 0 featextractactivationgelu convdim32 32 32 convstride4 4 4 convkernel8 8 8 convbiasfalse numconvposembeddings16 numconvposembeddinggroups2 numhiddenlayers2 numattentionheads2 hiddendropoutprob0 1 this is most likely not correctly set yet intermediatesize20 layernormeps1e5 hiddenactgelu initializerrange0 02 vocabsize32 dostablelayernormfalse scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self hiddensize hiddensize self featextractnorm featextractnorm self featextractdropout featextractdropout self featextractactivation featextractactivation self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddendropoutprob hiddendropoutprob self intermediatesize intermediatesize self layernormeps layernormeps self hiddenact hiddenact self initializerrange initializerrange self vocabsize vocabsize self dostablelayernorm dostablelayernorm self scope scope outputseqlength self seqlength for kernel stride in zipself convkernel self convstride outputseqlength outputseqlength kernel 1 stride self outputseqlength intmath ceiloutputseqlength self encoderseqlength self outputseqlength def prepareconfigandinputsself inputvalues floatstensorself batchsize self seqlength scale1 0 attentionmask randomattentionmaskself batchsize self seqlength config self getconfig return config inputvalues attentionmask def getconfigself return unispeechconfig hiddensizeself hiddensize featextractnormself featextractnorm featextractdropoutself featextractdropout featextractactivationself featextractactivation convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddendropoutprobself hiddendropoutprob intermediatesizeself intermediatesize layernormepsself layernormeps hiddenactself hiddenact initializerrangeself initializerrange vocabsizeself vocabsize def createandcheckmodelself config inputvalues attentionmask model unispeechmodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckbatchinferenceself config inputvalues args test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 model unispeechmodelconfigconfig model totorchdevice model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch bool inputlengths inputvalues shape1 i for i in 4 2 1 pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 0 batchoutputs modelinputvalues attentionmaskattentionmask lasthiddenstate for i in rangeinputvalues shape0 inputslice inputvaluesi i 1 inputlengthsi output modelinputslice lasthiddenstate batchoutput batchoutputsi i 1 output shape1 self parent asserttruetorch allcloseoutput batchoutput atol1e3 def checkctclossself config inputvalues args model unispeechforctcconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 minmaxlengthlabels 1 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 model config ctclossreduction sum sumloss modelinputvalues attentionmaskattentionmask labelslabels loss item model config ctclossreduction mean meanloss modelinputvalues attentionmaskattentionmask labelslabels loss item self parent asserttrueisinstancesumloss float self parent asserttrueisinstancemeanloss float def checkseqclassifierlossself config inputvalues args model unispeechforsequenceclassificationconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 maskedloss modelinputvalues attentionmaskattentionmask labelslabels loss item unmaskedloss modelinputvalues labelslabels loss item self parent asserttrueisinstancemaskedloss float self parent asserttrueisinstanceunmaskedloss float self parent asserttruemaskedloss unmaskedloss def checkctctrainingself config inputvalues args config ctczeroinfinity true model unispeechforctcconfigconfig model totorchdevice model train freeze feature encoder model freezefeatureencoder inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 if maxlengthlabelsi labels shape1 it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf labelsi maxlengthlabelsi 1 100 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkseqclassifiertrainingself config inputvalues args config ctczeroinfinity true model unispeechforsequenceclassificationconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checklabelsoutofvocabself config inputvalues args model unispeechforctcconfig model totorchdevice model train inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize 100 with pytest raisesvalueerror modelinputvalues labelslabels def prepareconfigandinputsforcommonself config inputvalues attentionmask self prepareconfigandinputs inputsdict inputvalues inputvalues attentionmask attentionmask return config inputsdict requiretorch class unispeechrobustmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses unispeechforctc unispeechmodel unispeechforsequenceclassification unispeechforpretraining if istorchavailable else pipelinemodelmapping audioclassification unispeechforsequenceclassification automaticspeechrecognition unispeechforctc featureextraction unispeechmodel if istorchavailable else testpruning false testheadmasking false def setupself self modeltester unispeechmodeltester self convstride3 3 3 featextractnormlayer dostablelayernormtrue self configtester configtesterself configclassunispeechconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testbatchedinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbatchinferenceconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs unispeech has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass unispeech cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass unispeech has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias featureprojection projection weight featureprojection projection bias if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule codevectors and module codevectors is not none module codevectors data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 def testmaskfeatureprobctcself model unispeechforctc frompretrained hfinternaltestingtinyrandomunispeech maskfeatureprob0 2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomunispeech returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimeprobctcself model unispeechforctc frompretrained hfinternaltestingtinyrandomunispeech masktimeprob0 2 masktimelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomunispeech returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimefeatureprobctcsinglebatchself model unispeechforctc frompretrained hfinternaltestingtinyrandomunispeech masktimeprob0 2 maskfeatureprob0 2 masktimelength2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomunispeech returnattentionmasktrue batchdurationinseconds 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 1 1498 32 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model unispeechmodel frompretrainedmicrosoftunispeechlarge1500hcv self assertisnotnonemodel requiretorch requiresoundfile slow class unispeechmodelintegrationtestunittest testcase def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid filter lambda x xid in f1272141231000i for i in rangenumsamples numsamplesaudio return xarray for x in speechsamples def loadsuperbself task numsamples ds loaddatasetantonlsuperbdummy task splittest return ds numsamples def testinferencepretrainingself model unispeechforpretraining frompretrainedmicrosoftunispeechlarge1500hcv model totorchdevice featureextractor wav2vec2featureextractor frompretrainedfacebookwav2vec2largexlsr53 inputspeech self loaddatasamples2 inputsdict featureextractorinputspeech returntensorspt paddingtrue with torch nograd torch manualseed0 outputs model inputsdict inputvalues totorchdevice attentionmaskinputsdict attentionmask totorchdevice compute cosine similarity cosinesim torch cosinesimilarityoutputs projectedstates outputs projectedquantizedstates dim1 pretrained model should have learned a high cosine similarity self asserttruecosinesim mean 0 5 fmt off expectedcosinesimslice torch tensor 0 8290 0 8335 0 8815 0 8580 0 8249 0 8892 0 9221 0 8711 0 8601 0 8482 devicetorchdevice fmt on self asserttruetorch allclosecosinesim 5 expectedcosinesimslice atol1e3 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch unispeech model speech is longer this is most likely not correctly set yet test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 pad input make sure that dropout is disabled pad input make sure that dropout is disabled pad input freeze feature encoder pad input it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf freeze everything but the classification head pad input unispeech has no inputs_embeds input_ids is renamed to input_values unispeech cannot resize token embeddings since it has no tokens embeddings unispeech has no inputs_embeds and thus the get_input_embeddings fn is not implemented no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common automatic decoding with librispeech compute cosine similarity pretrained model should have learned a high cosine similarity fmt off fmt on
import math import unittest import numpy as np import pytest from datasets import load_dataset from transformers import UniSpeechConfig, is_torch_available from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) class UniSpeechModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return UniSpeechConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, ) def create_and_check_model(self, config, input_values, attention_mask): model = UniSpeechModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): model = UniSpeechModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = UniSpeechForCTC(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = UniSpeechForSequenceClassification(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechForCTC(config=config) model.to(torch_device) model.train() model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechForSequenceClassification(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = UniSpeechForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class UniSpeechRobustModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (UniSpeechForCTC, UniSpeechModel, UniSpeechForSequenceClassification, UniSpeechForPreTraining) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": UniSpeechForSequenceClassification, "automatic-speech-recognition": UniSpeechForCTC, "feature-extraction": UniSpeechModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = UniSpeechModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=UniSpeechConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = UniSpeechForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = UniSpeechForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = UniSpeechForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = UniSpeechModel.from_pretrained("microsoft/unispeech-large-1500h-cv") self.assertIsNotNone(model) @require_torch @require_soundfile @slow class UniSpeechModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_pretraining(self): model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) with torch.no_grad(): torch.manual_seed(0) outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), ) cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) self.assertTrue(cosine_sim.mean() > 0.5) expected_cosine_sim_slice = torch.tensor( [[0.8290, 0.8335, 0.8815, 0.8580, 0.8249], [0.8892, 0.9221, 0.8711, 0.8601, 0.8482]], device=torch_device, ) self.assertTrue(torch.allclose(cosine_sim[:, :5], expected_cosine_sim_slice, atol=1e-3))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch unispeechsat model import math import unittest import numpy as np import pytest from datasets import loaddataset from transformers import unispeechsatconfig istorchavailable from transformers testingutils import requiresoundfile requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import unispeechsatforaudioframeclassification unispeechsatforctc unispeechsatforpretraining unispeechsatforsequenceclassification unispeechsatforxvector unispeechsatmodel wav2vec2featureextractor wav2vec2processor class unispeechsatmodeltester def init self parent batchsize13 seqlength1024 speech is longer istrainingfalse hiddensize16 featextractnormgroup featextractdropout0 0 featextractactivationgelu convdim32 32 32 convstride4 4 4 convkernel8 8 8 convbiasfalse numconvposembeddings16 numconvposembeddinggroups2 numhiddenlayers2 numattentionheads2 hiddendropoutprob0 1 this is most likely not correctly set yet intermediatesize20 layernormeps1e5 hiddenactgelu initializerrange0 02 masktimeprob0 5 masktimelength2 vocabsize32 dostablelayernormfalse tdnndim32 32 tdnnkernel3 3 tdnndilation1 1 xvectoroutputdim32 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self hiddensize hiddensize self featextractnorm featextractnorm self featextractdropout featextractdropout self featextractactivation featextractactivation self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddendropoutprob hiddendropoutprob self intermediatesize intermediatesize self layernormeps layernormeps self hiddenact hiddenact self initializerrange initializerrange self vocabsize vocabsize self dostablelayernorm dostablelayernorm self masktimeprob masktimeprob self masktimelength masktimelength self tdnndim tdnndim self tdnnkernel tdnnkernel self tdnndilation tdnndilation self xvectoroutputdim xvectoroutputdim self scope scope outputseqlength self seqlength for kernel stride in zipself convkernel self convstride outputseqlength outputseqlength kernel 1 stride self outputseqlength intmath ceiloutputseqlength self encoderseqlength self outputseqlength def prepareconfigandinputsself inputvalues floatstensorself batchsize self seqlength scale1 0 attentionmask randomattentionmaskself batchsize self seqlength config self getconfig return config inputvalues attentionmask def getconfigself return unispeechsatconfig hiddensizeself hiddensize featextractnormself featextractnorm featextractdropoutself featextractdropout featextractactivationself featextractactivation convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups masktimeprobself masktimeprob masktimelengthself masktimelength numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddendropoutprobself hiddendropoutprob intermediatesizeself intermediatesize layernormepsself layernormeps hiddenactself hiddenact initializerrangeself initializerrange vocabsizeself vocabsize tdnndimself tdnndim tdnnkernelself tdnnkernel tdnndilationself tdnndilation xvectoroutputdimself xvectoroutputdim def createandcheckmodelself config inputvalues attentionmask model unispeechsatmodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckbatchinferenceself config inputvalues args test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 model unispeechsatmodelconfigconfig model totorchdevice model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch bool inputlengths inputvalues shape1 i for i in 4 2 1 pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 0 batchoutputs modelinputvalues attentionmaskattentionmask lasthiddenstate for i in rangeinputvalues shape0 inputslice inputvaluesi i 1 inputlengthsi output modelinputslice lasthiddenstate batchoutput batchoutputsi i 1 output shape1 self parent asserttruetorch allcloseoutput batchoutput atol1e3 def checkctclossself config inputvalues args model unispeechsatforctcconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 minmaxlengthlabels 1 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 model config ctclossreduction sum sumloss modelinputvalues attentionmaskattentionmask labelslabels loss item model config ctclossreduction mean meanloss modelinputvalues attentionmaskattentionmask labelslabels loss item self parent asserttrueisinstancesumloss float self parent asserttrueisinstancemeanloss float def checkseqclassifierlossself config inputvalues args model unispeechsatforsequenceclassificationconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 maskedloss modelinputvalues attentionmaskattentionmask labelslabels loss item unmaskedloss modelinputvalues labelslabels loss item self parent asserttrueisinstancemaskedloss float self parent asserttrueisinstanceunmaskedloss float self parent asserttruemaskedloss unmaskedloss def checkctctrainingself config inputvalues args config ctczeroinfinity true model unispeechsatforctcconfigconfig model totorchdevice model train freeze feature encoder model freezefeatureencoder inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 if maxlengthlabelsi labels shape1 it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf labelsi maxlengthlabelsi 1 100 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkseqclassifiertrainingself config inputvalues args config ctczeroinfinity true model unispeechsatforsequenceclassificationconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkxvectortrainingself config args config ctczeroinfinity true model unispeechsatforxvectorconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel use a longer sequence length to account for tdnn temporal downsampling inputvalues floatstensorself batchsize self seqlength 2 scale1 0 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checklabelsoutofvocabself config inputvalues args model unispeechsatforctcconfig model totorchdevice model train inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize 100 with pytest raisesvalueerror modelinputvalues labelslabels def prepareconfigandinputsforcommonself config inputvalues attentionmask self prepareconfigandinputs inputsdict inputvalues inputvalues attentionmask attentionmask return config inputsdict requiretorch class unispeechsatmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses unispeechsatforctc unispeechsatforpretraining unispeechsatmodel unispeechsatforsequenceclassification unispeechsatforaudioframeclassification unispeechsatforxvector if istorchavailable else pipelinemodelmapping audioclassification unispeechsatforsequenceclassification automaticspeechrecognition unispeechsatforctc featureextraction unispeechsatmodel if istorchavailable else testpruning false testheadmasking false testtorchscript false def setupself self modeltester unispeechsatmodeltesterself self configtester configtesterself configclassunispeechsatconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testxvectortrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkxvectortrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs unispeechsat has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass unispeechsat cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass unispeechsat has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias featureprojection projection weight featureprojection projection bias labelembeddingsconcat objective weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule codevectors and module codevectors is not none module codevectors data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 def testmaskfeatureprobctcself model unispeechsatforctc frompretrained hfinternaltestingtinyrandomunispeechsat maskfeatureprob0 2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomunispeechsat returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimeprobctcself model unispeechsatforctc frompretrained hfinternaltestingtinyrandomunispeechsat masktimeprob0 2 masktimelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomunispeechsat returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model unispeechsatmodel frompretrainedmicrosoftunispeechsatbaseplus self assertisnotnonemodel requiretorch class unispeechsatrobustmodeltestmodeltestermixin unittest testcase allmodelclasses unispeechsatforctc unispeechsatforpretraining unispeechsatmodel unispeechsatforsequenceclassification if istorchavailable else testpruning false testheadmasking false testtorchscript false def setupself self modeltester unispeechsatmodeltester self convstride3 3 3 featextractnormlayer dostablelayernormtrue self configtester configtesterself configclassunispeechsatconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testbatchedinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbatchinferenceconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs unispeechsat has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass unispeechsat cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass unispeechsat has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias featureprojection projection weight featureprojection projection bias labelembeddingsconcat objective weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule codevectors and module codevectors is not none module codevectors data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 def testmaskfeatureprobctcself model unispeechsatforctc frompretrained hfinternaltestingtinyrandomunispeechsat maskfeatureprob0 2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomunispeechsat returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimeprobctcself model unispeechsatforctc frompretrained hfinternaltestingtinyrandomunispeechsat masktimeprob0 2 masktimelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomunispeechsat returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimefeatureprobctcsinglebatchself model unispeechsatforctc frompretrained hfinternaltestingtinyrandomunispeechsat masktimeprob0 2 maskfeatureprob0 2 masktimelength2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomunispeechsat returnattentionmasktrue batchdurationinseconds 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 1 1498 32 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model unispeechsatmodel frompretrainedmicrosoftunispeechsatlarge self assertisnotnonemodel requiretorch requiresoundfile slow class unispeechsatmodelintegrationtestunittest testcase def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid filter lambda x xid in f1272141231000i for i in rangenumsamples numsamplesaudio return xarray for x in speechsamples def loadsuperbself task numsamples ds loaddatasetantonlsuperbdummy task splittest return ds numsamples def testinferenceencoderbaseself model unispeechsatmodel frompretrainedmicrosoftunispeechsatbaseplus model totorchdevice featureextractor wav2vec2featureextractor frompretrained facebookwav2vec2base returnattentionmasktrue inputspeech self loaddatasamples2 inputsdict featureextractorinputspeech returntensorspt paddingtrue with torch nograd outputs model inputsdict inputvalues totorchdevice attentionmaskinputsdict attentionmask totorchdevice fmt off expectedhiddenstatesslice torch tensor 0 0743 0 1384 0 0845 0 1704 0 0954 0 1936 0 1123 0 2095 devicetorchdevice fmt on self asserttruetorch allcloseoutputs lasthiddenstate 2 2 expectedhiddenstatesslice atol1e3 def testinferenceencoderlargeself model unispeechsatmodel frompretrainedmicrosoftunispeechsatlarge model totorchdevice featureextractor wav2vec2featureextractor frompretrainedfacebookwav2vec2largexlsr53 inputspeech self loaddatasamples2 inputsdict featureextractorinputspeech returntensorspt paddingtrue with torch nograd outputs model inputsdict inputvalues totorchdevice attentionmaskinputsdict attentionmask totorchdevice fmt off expectedhiddenstatesslice torch tensor 0 1172 0 0797 0 0012 0 0213 0 1225 0 1277 0 0668 0 0585 devicetorchdevice fmt on self asserttruetorch allcloseoutputs lasthiddenstate 2 2 expectedhiddenstatesslice atol1e3 def testinferencediarizationself model unispeechsatforaudioframeclassification frompretrainedmicrosoftunispeechsatbaseplussd to torchdevice processor wav2vec2featureextractor frompretrainedmicrosoftunispeechsatbaseplussd inputdata self loadsuperbsd 4 inputs processorinputdataspeech returntensorspt paddingtrue samplingrate16000 inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask labels is a onehot array of shape numframes numspeakers labels outputs logits 0 long s3prl logits for the same batch expectedlogits torch tensor 5 6119 5 5845 3 7772 5 4824 3 6914 5 1619 4 7560 5 0496 6 3785 4 8365 5 5863 5 4149 5 5639 4 8469 6 1511 4 0052 6 0355 3 7414 5 5968 4 8061 5 4620 4 7310 5 5864 4 6078 5 9493 4 8963 4 4050 5 4476 4 1755 5 1395 4 0272 4 3705 devicetorchdevice self assertequallabels0 0 sum 270 self assertequallabels0 1 sum 647 todo update the tolerance after the ci moves to torch 1 10 self asserttruetorch allcloseoutputs logits 4 expectedlogits atol1e2 def testinferencespeakerverificationself model unispeechsatforxvector frompretrainedmicrosoftunispeechsatbaseplussv totorchdevice processor wav2vec2featureextractor frompretrainedmicrosoftunispeechsatbaseplussv inputdata self loadsuperbsi 4 inputs processorinputdataspeech returntensorspt paddingtrue labels torch tensor5 1 1 3 devicetorchdevice t with torch nograd inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice outputs modelinputvalues attentionmaskattentionmask labelslabels embeddings torch nn functional normalizeoutputs embeddings dim1 cosinesim torch nn cosinesimilaritydim1 id10002 vs id10002 self assertalmostequalcosinesimembeddings1 embeddings2 item 0 9671 3 id10006 vs id10002 self assertalmostequalcosinesimembeddings0 embeddings1 item 0 4941 3 id10002 vs id10004 self assertalmostequalcosinesimembeddings2 embeddings3 item 0 5616 3 todo update the tolerance after the ci moves to torch 1 10 self assertalmostequaloutputs loss item 18 5925 2 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch unispeechsat model speech is longer this is most likely not correctly set yet test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 pad input make sure that dropout is disabled pad input make sure that dropout is disabled pad input freeze feature encoder pad input it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf freeze everything but the classification head pad input freeze everything but the classification head use a longer sequence length to account for tdnn temporal downsampling pad input unispeechsat has no inputs_embeds input_ids is renamed to input_values unispeechsat cannot resize token embeddings since it has no tokens embeddings unispeechsat has no inputs_embeds and thus the get_input_embeddings fn is not implemented no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common unispeechsat has no inputs_embeds input_ids is renamed to input_values unispeechsat cannot resize token embeddings since it has no tokens embeddings unispeechsat has no inputs_embeds and thus the get_input_embeddings fn is not implemented no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common automatic decoding with librispeech fmt off fmt on fmt off fmt on labels is a one hot array of shape num_frames num_speakers s3prl logits for the same batch todo update the tolerance after the ci moves to torch 1 10 id10002 vs id10002 id10006 vs id10002 id10002 vs id10004 todo update the tolerance after the ci moves to torch 1 10
import math import unittest import numpy as np import pytest from datasets import load_dataset from transformers import UniSpeechSatConfig, is_torch_available from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( UniSpeechSatForAudioFrameClassification, UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, UniSpeechSatModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) class UniSpeechSatModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, tdnn_dim=(32, 32), tdnn_kernel=(3, 3), tdnn_dilation=(1, 1), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return UniSpeechSatConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = UniSpeechSatModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): model = UniSpeechSatModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = UniSpeechSatForCTC(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = UniSpeechSatForSequenceClassification(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechSatForCTC(config=config) model.to(torch_device) model.train() model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechSatForSequenceClassification(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, *args): config.ctc_zero_infinity = True model = UniSpeechSatForXVector(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = floats_tensor([self.batch_size, self.seq_length * 2], scale=1.0) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = UniSpeechSatForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class UniSpeechSatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatModel, UniSpeechSatForSequenceClassification, UniSpeechSatForAudioFrameClassification, UniSpeechSatForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": UniSpeechSatForSequenceClassification, "automatic-speech-recognition": UniSpeechSatForCTC, "feature-extraction": UniSpeechSatModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = UniSpeechSatModelTester(self) self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "label_embeddings_concat", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-base-plus") self.assertIsNotNone(model) @require_torch class UniSpeechSatRobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( (UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatModel, UniSpeechSatForSequenceClassification) if is_torch_available() else () ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = UniSpeechSatModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "label_embeddings_concat", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large") self.assertIsNotNone(model) @require_torch @require_soundfile @slow class UniSpeechSatModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_encoder_base(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-base-plus") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), ) expected_hidden_states_slice = torch.tensor( [[[-0.0743, 0.1384], [-0.0845, 0.1704]], [[-0.0954, 0.1936], [-0.1123, 0.2095]]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, atol=1e-3)) def test_inference_encoder_large(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), ) expected_hidden_states_slice = torch.tensor( [[[-0.1172, -0.0797], [-0.0012, 0.0213]], [[-0.1225, -0.1277], [-0.0668, -0.0585]]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, atol=1e-3)) def test_inference_diarization(self): model = UniSpeechSatForAudioFrameClassification.from_pretrained("microsoft/unispeech-sat-base-plus-sd").to( torch_device ) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/unispeech-sat-base-plus-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) labels = (outputs.logits > 0).long() expected_logits = torch.tensor( [ [[-5.6119, -5.5845], [-3.7772, -5.4824], [-3.6914, -5.1619], [-4.7560, -5.0496]], [[-6.3785, -4.8365], [-5.5863, -5.4149], [-5.5639, -4.8469], [-6.1511, -4.0052]], [[-6.0355, -3.7414], [-5.5968, -4.8061], [-5.4620, -4.7310], [-5.5864, -4.6078]], [[-5.9493, -4.8963], [-4.4050, -5.4476], [-4.1755, -5.1395], [-4.0272, -4.3705]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 270) self.assertEqual(labels[0, :, 1].sum(), 647) self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): model = UniSpeechSatForXVector.from_pretrained("microsoft/unispeech-sat-base-plus-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/unispeech-sat-base-plus-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1) cosine_sim = torch.nn.CosineSimilarity(dim=-1) self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).item(), 0.9671, 3) self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).item(), 0.4941, 3) self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.5616, 3) self.assertAlmostEqual(outputs.loss.item(), 18.5925, 2)
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper testfeatureextractionwhisper floatslist creates a random float32 tensor if rng is none rng globalrng values for batchidx in rangeshape0 values append for in rangeshape1 values1 appendrng random scale return values class univnetfeatureextractiontesterunittest testcase def init self parent batchsize7 minseqlength400 maxseqlength2000 featuresize1 samplingrate24000 paddingvalue0 0 donormalizetrue nummelbins100 hoplength256 winlength1024 winfunctionhannwindow filterlength1024 maxlengths10 fmin0 0 fmax12000 melfloor1e9 centerfalse compressionfactor1 0 compressionclipval1e5 normalizemin11 512925148010254 normalizemax2 3143386840820312 modelinchannels64 padendlength10 self parent parent self batchsize batchsize self minseqlength minseqlength self maxseqlength maxseqlength self seqlengthdiff self maxseqlength self minseqlength self batchsize 1 self featuresize featuresize self samplingrate samplingrate self paddingvalue paddingvalue self donormalize donormalize self nummelbins nummelbins self hoplength hoplength self winlength winlength self winfunction winfunction self filterlength filterlength self maxlengths maxlengths self fmin fmin self fmax fmax self melfloor melfloor self center center self compressionfactor compressionfactor self compressionclipval compressionclipval self normalizemin normalizemin self normalizemax normalizemax self modelinchannels modelinchannels self padendlength padendlength def preparefeatextractdictself return featuresize self featuresize samplingrate self samplingrate paddingvalue self paddingvalue donormalize self donormalize nummelbins self nummelbins hoplength self hoplength winlength self winlength winfunction self winfunction filterlength self filterlength maxlengths self maxlengths fmin self fmin fmax self fmax melfloor self melfloor center self center compressionfactor self compressionfactor compressionclipval self compressionclipval normalizemin self normalizemin normalizemax self normalizemax modelinchannels self modelinchannels padendlength self padendlength def prepareinputsforcommonself equallengthfalse numpifyfalse def flattenlistoflists return listitertools chainlistoflists if equallength speechinputs floatslistself batchsize self maxseqlength else make sure that inputs increase in size speechinputs flattenfloatslistx self featuresize for x in rangeself minseqlength self maxseqlength self seqlengthdiff if numpify speechinputs np asarrayx for x in speechinputs return speechinputs class univnetfeatureextractiontestsequencefeatureextractiontestmixin unittest testcase featureextractionclass univnetfeatureextractor def setupself self featextracttester univnetfeatureextractiontesterself copied from tests models whisper testfeatureextractionwhisper whisperfeatureextractiontest testfeatextractfromandsavepretrained def testfeatextractfromandsavepretrainedself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname savedfile featextractfirst savepretrainedtmpdirname0 checkjsonfilehascorrectformatsavedfile featextractsecond self featureextractionclass frompretrainedtmpdirname dictfirst featextractfirst todict dictsecond featextractsecond todict mel1 featextractfirst melfilters mel2 featextractsecond melfilters self asserttruenp allclosemel1 mel2 self assertequaldictfirst dictsecond copied from tests models whisper testfeatureextractionwhisper whisperfeatureextractiontest testfeatextracttojsonfile def testfeatextracttojsonfileself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname jsonfilepath os path jointmpdirname featextract json featextractfirst tojsonfilejsonfilepath featextractsecond self featureextractionclass fromjsonfilejsonfilepath dictfirst featextractfirst todict dictsecond featextractsecond todict mel1 featextractfirst melfilters mel2 featextractsecond melfilters self asserttruenp allclosemel1 mel2 self assertequaldictfirst dictsecond def testcallself tests that all call wrap to encodeplus and batchencodeplus featureextractor self featureextractionclassself featextracttester preparefeatextractdict create three inputs of length 800 1000 and 1200 speechinputs floatslist1 x0 for x in range800 1400 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs test feature size inputfeatures featureextractor npspeechinputs paddingmaxlength maxlength1600 returntensorsnp inputfeatures self asserttrueinputfeatures ndim 3 note for some reason i get a weird padding error when featuresize 1 self asserttrueinputfeatures shape2 featureextractor featuresize note we use the shape convention batchsize seqlen nummelbins self asserttrueinputfeatures shape1 featureextractor nummelbins test not batched input encodedsequences1 featureextractorspeechinputs0 returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs0 returntensorsnp inputfeatures self asserttruenp allcloseencodedsequences1 encodedsequences2 atol1e3 test batched encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test 2d numpy arrays are batched speechinputs floatslist1 x0 for x in 800 800 800 npspeechinputs np asarrayspeechinputs encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test truncation required speechinputs floatslist1 x0 for x in rangefeatureextractor nummaxsamples 100 featureextractor nummaxsamples 500 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs speechinputstruncated x featureextractor nummaxsamples for x in speechinputs npspeechinputstruncated np asarrayspeechinput for speechinput in speechinputstruncated encodedsequences1 featureextractornpspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputstruncated returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 def testbatchedunbatchedconsistencyself featureextractor self featureextractionclassself featextractdict speechinputs floatslist1 8000 npspeechinputs np asarrayspeechinputs test unbatched vs batched list encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractorspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test np ndarray vs listnp ndarray encodedsequences1 featureextractornpspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test unbatched np ndarray vs batched np ndarray encodedsequences1 featureextractornpspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractor np expanddimsnpspeechinputs axis0 returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 def testgeneratenoiseself featureextractor self featureextractionclassself featextractdict speechinputs floatslist1 x0 for x in range800 1400 200 features featureextractorspeechinputs returnnoisetrue inputfeatures features inputfeatures noisefeatures features noisesequence for spectrogram noise in zipinputfeatures noisefeatures self assertequalspectrogram shape0 noise shape0 def testpadendself featureextractor self featureextractionclassself featextractdict speechinputs floatslist1 x0 for x in range800 1400 200 inputfeatures1 featureextractorspeechinputs paddingfalse padendfalse inputfeatures inputfeatures2 featureextractorspeechinputs paddingfalse padendtrue inputfeatures for spectrogram1 spectrogram2 in zipinputfeatures1 inputfeatures2 self assertequalspectrogram1 shape0 self featextracttester padendlength spectrogram2 shape0 def testgeneratenoiseandpadendself featureextractor self featureextractionclassself featextractdict speechinputs floatslist1 x0 for x in range800 1400 200 features featureextractorspeechinputs paddingfalse returnnoisetrue padendtrue inputfeatures features inputfeatures noisefeatures features noisesequence for spectrogram noise in zipinputfeatures noisefeatures self assertequalspectrogram shape0 noise shape0 requiretorch def testbatchdecodeself import torch featureextractor self featureextractionclassself featextractdict inputlengths listrange800 1400 200 padsamples featureextractor padendlength featureextractor hoplength outputfeatures waveforms torch tensorfloatslist3 maxinputlengths padsamples waveformlengths torch tensorinputlengths waveforms featureextractor batchdecodeoutputfeatures for inputlength waveform in zipinputlengths waveforms self asserttruelenwaveform shape 1 msgindividual output waveforms should be 1d self assertequalwaveform shape0 inputlength requiretorch copied from tests models whisper testfeatureextractionwhisper whisperfeatureextractiontest testdoubleprecisionpad def testdoubleprecisionpadself import torch featureextractor self featureextractionclassself featextracttester preparefeatextractdict npspeechinputs np random rand100 32 astypenp float64 pyspeechinputs npspeechinputs tolist for inputs in pyspeechinputs npspeechinputs npprocessed featureextractor padinputfeatures inputs returntensorsnp self asserttruenpprocessed inputfeatures dtype np float32 ptprocessed featureextractor padinputfeatures inputs returntensorspt self asserttrueptprocessed inputfeatures dtype torch float32 def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation ds ds castcolumnaudio audiosamplingrateself featextracttester samplingrate automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples xsamplingrate for x in speechsamples slow requiretorch def testintegrationself fmt off expectedinputfeatures torch tensor 5 0229 6 1358 5 8346 5 4447 5 6707 5 8577 5 0464 5 0058 5 6015 5 6410 5 4325 5 6116 5 3700 5 7956 5 3196 5 3274 5 9655 5 6057 5 8382 5 9602 5 9005 5 9123 5 7669 6 1441 5 5168 5 1405 5 3927 6 0032 5 5784 5 3728 fmt on inputspeech sr self loaddatasamples1 featureextractor univnetfeatureextractor inputfeatures featureextractorinputspeech samplingratesr0 returntensorspt inputfeatures self assertequalinputfeatures shape 1 548 100 inputfeaturesmean torch meaninputfeatures inputfeaturesstddev torch stdinputfeatures expectedmean torch tensor6 18862009 expectedstddev torch tensor2 80845642 torch testing assertcloseinputfeaturesmean expectedmean atol5e5 rtol5e6 torch testing assertcloseinputfeaturesstddev expectedstddev torch testing assertcloseinputfeatures0 30 0 expectedinputfeatures atol1e4 rtol1e5 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor make sure that inputs increase in size copied from tests models whisper test_feature_extraction_whisper whisperfeatureextractiontest test_feat_extract_from_and_save_pretrained copied from tests models whisper test_feature_extraction_whisper whisperfeatureextractiontest test_feat_extract_to_json_file tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test feature size note for some reason i get a weird padding error when feature_size 1 self asserttrue input_features shape 2 feature_extractor feature_size note we use the shape convention batch_size seq_len num_mel_bins test not batched input test batched test 2 d numpy arrays are batched test truncation required test unbatched vs batched list test np ndarray vs list np ndarray test unbatched np ndarray vs batched np ndarray copied from tests models whisper test_feature_extraction_whisper whisperfeatureextractiontest test_double_precision_pad automatic decoding with librispeech fmt off fmt on
import itertools import os import random import tempfile import unittest import numpy as np from datasets import Audio, load_dataset from transformers import UnivNetFeatureExtractor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, slow from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class UnivNetFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, sampling_rate=24000, padding_value=0.0, do_normalize=True, num_mel_bins=100, hop_length=256, win_length=1024, win_function="hann_window", filter_length=1024, max_length_s=10, fmin=0.0, fmax=12000, mel_floor=1e-9, center=False, compression_factor=1.0, compression_clip_val=1e-5, normalize_min=-11.512925148010254, normalize_max=2.3143386840820312, model_in_channels=64, pad_end_length=10, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.sampling_rate = sampling_rate self.padding_value = padding_value self.do_normalize = do_normalize self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.filter_length = filter_length self.max_length_s = max_length_s self.fmin = fmin self.fmax = fmax self.mel_floor = mel_floor self.center = center self.compression_factor = compression_factor self.compression_clip_val = compression_clip_val self.normalize_min = normalize_min self.normalize_max = normalize_max self.model_in_channels = model_in_channels self.pad_end_length = pad_end_length def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "sampling_rate": self.sampling_rate, "padding_value": self.padding_value, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "filter_length": self.filter_length, "max_length_s": self.max_length_s, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "center": self.center, "compression_factor": self.compression_factor, "compression_clip_val": self.compression_clip_val, "normalize_min": self.normalize_min, "normalize_max": self.normalize_max, "model_in_channels": self.model_in_channels, "pad_end_length": self.pad_end_length, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs class UnivNetFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = UnivNetFeatureExtractor def setUp(self): self.feat_extract_tester = UnivNetFeatureExtractionTester(self) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] input_features = feature_extractor( np_speech_inputs, padding="max_length", max_length=1600, return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.num_mel_bins) encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [ floats_list((1, x))[0] for x in range((feature_extractor.num_max_samples - 100), (feature_extractor.num_max_samples + 500), 200) ] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] speech_inputs_truncated = [x[: feature_extractor.num_max_samples] for x in speech_inputs] np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_batched_unbatched_consistency(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = floats_list((1, 800))[0] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor([speech_inputs], return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor([np_speech_inputs], return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor( np.expand_dims(np_speech_inputs, axis=0), return_tensors="np" ).input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_generate_noise(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] features = feature_extractor(speech_inputs, return_noise=True) input_features = features.input_features noise_features = features.noise_sequence for spectrogram, noise in zip(input_features, noise_features): self.assertEqual(spectrogram.shape[0], noise.shape[0]) def test_pad_end(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] input_features1 = feature_extractor(speech_inputs, padding=False, pad_end=False).input_features input_features2 = feature_extractor(speech_inputs, padding=False, pad_end=True).input_features for spectrogram1, spectrogram2 in zip(input_features1, input_features2): self.assertEqual(spectrogram1.shape[0] + self.feat_extract_tester.pad_end_length, spectrogram2.shape[0]) def test_generate_noise_and_pad_end(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] features = feature_extractor(speech_inputs, padding=False, return_noise=True, pad_end=True) input_features = features.input_features noise_features = features.noise_sequence for spectrogram, noise in zip(input_features, noise_features): self.assertEqual(spectrogram.shape[0], noise.shape[0]) @require_torch def test_batch_decode(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) input_lengths = list(range(800, 1400, 200)) pad_samples = feature_extractor.pad_end_length * feature_extractor.hop_length output_features = { "waveforms": torch.tensor(floats_list((3, max(input_lengths) + pad_samples))), "waveform_lengths": torch.tensor(input_lengths), } waveforms = feature_extractor.batch_decode(**output_features) for input_length, waveform in zip(input_lengths, waveforms): self.assertTrue(len(waveform.shape) == 1, msg="Individual output waveforms should be 1D") self.assertEqual(waveform.shape[0], input_length) @require_torch def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", Audio(sampling_rate=self.feat_extract_tester.sampling_rate)) speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples] @slow @require_torch def test_integration(self): EXPECTED_INPUT_FEATURES = torch.tensor( [ -5.0229, -6.1358, -5.8346, -5.4447, -5.6707, -5.8577, -5.0464, -5.0058, -5.6015, -5.6410, -5.4325, -5.6116, -5.3700, -5.7956, -5.3196, -5.3274, -5.9655, -5.6057, -5.8382, -5.9602, -5.9005, -5.9123, -5.7669, -6.1441, -5.5168, -5.1405, -5.3927, -6.0032, -5.5784, -5.3728 ], ) input_speech, sr = self._load_datasamples(1) feature_extractor = UnivNetFeatureExtractor() input_features = feature_extractor(input_speech, sampling_rate=sr[0], return_tensors="pt").input_features self.assertEqual(input_features.shape, (1, 548, 100)) input_features_mean = torch.mean(input_features) input_features_stddev = torch.std(input_features) EXPECTED_MEAN = torch.tensor(-6.18862009) EXPECTED_STDDEV = torch.tensor(2.80845642) torch.testing.assert_close(input_features_mean, EXPECTED_MEAN, atol=5e-5, rtol=5e-6) torch.testing.assert_close(input_features_stddev, EXPECTED_STDDEV) torch.testing.assert_close(input_features[0, :30, 0], EXPECTED_INPUT_FEATURES, atol=1e-4, rtol=1e-5)
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license create noise on cpu for reproducibility univnetmodel currently cannot be traced with torch jit trace the univnetmodel is not a transformer and does not use any attention mechanisms so skip transformerattention related tests univnetmodel is not a sequence classification model univnetmodel does not have a basemodelprefix attribute univnetmodel does not implement a parallelize method signature parameters is an ordereddict so argnames order is deterministic automatic decoding with librispeech note hardcode modelinchannels 64 explicity generate noisesequence on cpu for consistency put noisesequence on the desired device note hardcode nummelchannels 100 note spectrogram should already be on torchdevice permute to match diffusers implementation load sample checkpoint from tortoise tts get batched noise and spectrogram inputs load sample checkpoint from tortoise tts get unbatched noise and spectrogram inputs fmt off fmt on 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license create noise on cpu for reproducibility univnetmodel currently cannot be traced with torch jit trace the univnetmodel is not a transformer and does not use any attention mechanisms so skip transformer attention related tests univnetmodel is not a sequence classification model univnetmodel does not have a base_model_prefix attribute univnetmodel does not implement a parallelize method signature parameters is an ordereddict so arg_names order is deterministic automatic decoding with librispeech note hardcode model_in_channels 64 explicity generate noise_sequence on cpu for consistency put noise_sequence on the desired device note hardcode num_mel_channels 100 note spectrogram should already be on torch_device permute to match diffusers implementation load sample checkpoint from tortoise tts get batched noise and spectrogram inputs load sample checkpoint from tortoise tts get unbatched noise and spectrogram inputs fmt off fmt on
import gc import inspect import random import unittest from datasets import Audio, load_dataset from transformers import UnivNetConfig, UnivNetFeatureExtractor from transformers.testing_utils import ( is_torch_available, require_torch, require_torch_gpu, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, floats_tensor, ) if is_torch_available(): import torch from transformers import UnivNetModel class UnivNetModelTester: def __init__( self, parent, batch_size=2, seq_length=7, in_channels=8, hidden_channels=8, num_mel_bins=20, kernel_predictor_hidden_channels=8, seed=0, is_training=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.in_channels = in_channels self.hidden_channels = hidden_channels self.num_mel_bins = num_mel_bins self.kernel_predictor_hidden_channels = kernel_predictor_hidden_channels self.seed = seed self.is_training = is_training def prepare_noise_sequence(self): generator = torch.manual_seed(self.seed) noise_shape = (self.seq_length, self.in_channels) noise_sequence = torch.randn(noise_shape, generator=generator, dtype=torch.float) return noise_sequence def prepare_config_and_inputs(self): spectrogram = floats_tensor([self.seq_length, self.num_mel_bins], scale=1.0) noise_sequence = self.prepare_noise_sequence() noise_sequence = noise_sequence.to(spectrogram.device) config = self.get_config() return config, spectrogram, noise_sequence def get_config(self): return UnivNetConfig( model_in_channels=self.in_channels, model_hidden_channels=self.hidden_channels, num_mel_bins=self.num_mel_bins, kernel_predictor_hidden_channels=self.kernel_predictor_hidden_channels, ) def create_and_check_model(self, config, spectrogram, noise_sequence): model = UnivNetModel(config=config).to(torch_device).eval() result = model(spectrogram, noise_sequence)[0] self.parent.assertEqual(result.shape, (1, self.seq_length * 256)) def prepare_config_and_inputs_for_common(self): config, spectrogram, noise_sequence = self.prepare_config_and_inputs() inputs_dict = {"input_features": spectrogram, "noise_sequence": noise_sequence} return config, inputs_dict @require_torch class UnivNetModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (UnivNetModel,) if is_torch_available() else () test_torchscript = False test_pruning = False test_resize_embeddings = False test_resize_position_embeddings = False test_head_masking = False test_mismatched_shapes = False test_missing_keys = False test_model_parallel = False is_encoder_decoder = False has_attentions = False input_name = "input_features" def setUp(self): self.model_tester = UnivNetModelTester(self) self.config_tester = ConfigTester(self, config_class=UnivNetConfig) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_from_and_save_pretrained_subfolder() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip(reason="UnivNetModel does not output hidden_states.") def test_hidden_states_output(self): pass @unittest.skip(reason="UnivNetModel.forward does not accept an inputs_embeds argument.") def test_inputs_embeds(self): pass @unittest.skip(reason="UnivNetModel does not use input embeddings and thus has no get_input_embeddings method.") def test_model_common_attributes(self): pass @unittest.skip(reason="UnivNetModel does not support all arguments tested, such as output_hidden_states.") def test_model_outputs_equivalence(self): pass @unittest.skip(reason="UnivNetModel does not output hidden_states.") def test_retain_grad_hidden_states_attentions(self): pass def test_batched_inputs_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() batched_spectrogram = inputs["input_features"].unsqueeze(0).repeat(2, 1, 1) batched_noise_sequence = inputs["noise_sequence"].unsqueeze(0).repeat(2, 1, 1) with torch.no_grad(): batched_outputs = model( batched_spectrogram.to(torch_device), batched_noise_sequence.to(torch_device), )[0] self.assertEqual( batched_spectrogram.shape[0], batched_outputs.shape[0], msg="Got different batch dims for input and output", ) def test_unbatched_inputs_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(inputs["input_features"].to(torch_device), inputs["noise_sequence"].to(torch_device))[ 0 ] self.assertTrue(outputs.shape[0] == 1, msg="Unbatched input should create batched output with bsz = 1") def test_unbatched_batched_outputs_consistency(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() unbatched_spectrogram = inputs["input_features"].detach().clone() unbatched_noise_sequence = inputs["noise_sequence"].detach().clone() batched_spectrogram = inputs["input_features"].unsqueeze(0) batched_noise_sequence = inputs["noise_sequence"].unsqueeze(0) with torch.no_grad(): unbatched_outputs = model( unbatched_spectrogram.to(torch_device), unbatched_noise_sequence.to(torch_device), )[0] batched_outputs = model( batched_spectrogram.to(torch_device), batched_noise_sequence.to(torch_device), )[0] torch.testing.assert_close(unbatched_outputs, batched_outputs) @require_torch_gpu @slow class UnivNetModelIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def _load_datasamples(self, num_samples, sampling_rate=24000): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", Audio(sampling_rate=sampling_rate)) speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples] def get_inputs(self, device, num_samples: int = 3, noise_length: int = 10, seed: int = 0): generator = torch.manual_seed(seed) if num_samples == 1: noise_sequence_shape = (64, noise_length) else: noise_sequence_shape = (num_samples, 64, noise_length) noise_sequence = torch.randn(noise_sequence_shape, generator=generator, dtype=torch.float32, device="cpu") noise_sequence = noise_sequence.to(device) if num_samples == 1: spectrogram_shape = [100, noise_length] else: spectrogram_shape = [num_samples, 100, noise_length] spectrogram = floats_tensor(spectrogram_shape, scale=1.0, rng=random.Random(seed)) if num_samples == 1: noise_sequence = noise_sequence.transpose(1, 0) spectrogram = spectrogram.transpose(1, 0) else: noise_sequence = noise_sequence.transpose(2, 1) spectrogram = spectrogram.transpose(2, 1) inputs = { "input_features": spectrogram, "noise_sequence": noise_sequence, "generator": generator, } return inputs def test_model_inference_batched(self): model = UnivNetModel.from_pretrained("dg845/univnet-dev") model.eval().to(torch_device) input_speech = self.get_inputs(torch_device, num_samples=3) with torch.no_grad(): waveform = model(**input_speech)[0] waveform = waveform.cpu() waveform_mean = torch.mean(waveform) waveform_stddev = torch.std(waveform) waveform_slice = waveform[-1, -9:].flatten() EXPECTED_MEAN = torch.tensor(-0.19989729) EXPECTED_STDDEV = torch.tensor(0.35230172) EXPECTED_SLICE = torch.tensor([-0.3408, -0.6045, -0.5052, 0.1160, -0.1556, -0.0405, -0.3024, -0.5290, -0.5019]) torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, atol=1e-4, rtol=1e-5) torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, atol=1e-4, rtol=1e-5) torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, atol=5e-4, rtol=1e-5) def test_model_inference_unbatched(self): model = UnivNetModel.from_pretrained("dg845/univnet-dev") model.eval().to(torch_device) input_speech = self.get_inputs(torch_device, num_samples=1) with torch.no_grad(): waveform = model(**input_speech)[0] waveform = waveform.cpu() waveform_mean = torch.mean(waveform) waveform_stddev = torch.std(waveform) waveform_slice = waveform[-1, -9:].flatten() EXPECTED_MEAN = torch.tensor(-0.22895093) EXPECTED_STDDEV = torch.tensor(0.33986747) EXPECTED_SLICE = torch.tensor([-0.3276, -0.5504, -0.3484, 0.3574, -0.0373, -0.1826, -0.4880, -0.6431, -0.5162]) torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, atol=1e-4, rtol=1e-5) torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, atol=1e-4, rtol=1e-5) torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, atol=1e-3, rtol=1e-5) def test_integration(self): feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev") model = UnivNetModel.from_pretrained("dg845/univnet-dev") model.eval().to(torch_device) audio, sr = self._load_datasamples(1, sampling_rate=feature_extractor.sampling_rate) input_features = feature_extractor(audio, sampling_rate=sr[0], return_tensors="pt").input_features input_features = input_features.to(device=torch_device) input_speech = self.get_inputs(torch_device, num_samples=1, noise_length=input_features.shape[1]) input_speech["input_features"] = input_features with torch.no_grad(): waveform = model(**input_speech)[0] waveform = waveform.cpu() waveform_mean = torch.mean(waveform) waveform_stddev = torch.std(waveform) waveform_slice = waveform[-1, -9:].flatten() EXPECTED_MEAN = torch.tensor(0.00051374) EXPECTED_STDDEV = torch.tensor(0.058105603) EXPECTED_SLICE = torch.tensor([-4.3934e-04, -1.8203e-04, -3.3033e-04, -3.8716e-04, -1.6125e-04, 3.5389e-06, -3.3149e-04, -3.7613e-04, -2.3331e-04]) torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, atol=5e-6, rtol=1e-5) torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, atol=1e-4, rtol=1e-5) torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, atol=5e-6, rtol=1e-5)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch upernet framework import unittest from huggingfacehub import hfhubdownload from transformers import convnextconfig upernetconfig from transformers testingutils import requiretorch requiretorchmultigpu requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import upernetforsemanticsegmentation from transformers models upernet modelingupernet import upernetpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import autoimageprocessor class upernetmodeltester def init self parent batchsize13 imagesize32 numchannels3 numstages4 hiddensizes10 20 30 40 depths1 1 1 1 istrainingtrue uselabelstrue intermediatesize37 hiddenactgelu typesequencelabelsize10 initializerrange0 02 outfeaturesstage2 stage3 stage4 numlabels3 scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self numstages numstages self hiddensizes hiddensizes self depths depths self istraining istraining self uselabels uselabels self intermediatesize intermediatesize self hiddenact hiddenact self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self outfeatures outfeatures self numlabels numlabels self scope scope self numhiddenlayers numstages def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getbackboneconfigself return convnextconfig numchannelsself numchannels numstagesself numstages hiddensizesself hiddensizes depthsself depths istrainingself istraining intermediatesizeself intermediatesize hiddenactself hiddenact outfeaturesself outfeatures def getconfigself return upernetconfig backboneconfigself getbackboneconfig hiddensize64 poolscales1 2 3 6 useauxiliaryheadtrue auxiliarylossweight0 4 auxiliaryinchannels40 auxiliarychannels32 auxiliarynumconvs1 auxiliaryconcatinputfalse lossignoreindex255 numlabelsself numlabels def createandcheckforsemanticsegmentationself config pixelvalues labels model upernetforsemanticsegmentationconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result logits shape self batchsize self numlabels self imagesize self imagesize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class upernetmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses upernetforsemanticsegmentation if istorchavailable else pipelinemodelmapping imagesegmentation upernetforsemanticsegmentation if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false testtorchscript false hasattentions false def setupself self modeltester upernetmodeltesterself self configtester configtesterself configclassupernetconfig hastextmodalityfalse hiddensize37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return def testforsemanticsegmentationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforsemanticsegmentationconfigandinputs unittest skipreasonupernet does not use inputsembeds def testinputsembedsself pass unittest skipreasonupernet does not support input and output embeddings def testmodelcommonattributesself pass unittest skipreasonupernet does not have a base model def testsaveloadfastinitfrombaseself pass unittest skipreasonupernet does not have a base model def testsaveloadfastinittobaseself pass requiretorchmultigpu unittest skipreasonupernet has some layers using addmodule which doesn t work well with nn dataparallel def testmultigpudataparallelforwardself pass def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumstages self modeltester numstages self assertequallenhiddenstates expectednumstages 1 convnext s feature maps are of shape batchsize numchannels height width self assertlistequal listhiddenstates0 shape2 self modeltester imagesize 4 self modeltester imagesize 4 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig configsnoinit backboneconfig configzeroinitconfigsnoinit backboneconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized unittest skipreasonupernet does not have tied weights def testtiedmodelweightskeyignoreself pass slow def testmodelfrompretrainedself for modelname in upernetpretrainedmodelarchivelist 1 model upernetforsemanticsegmentation frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of ade20k def prepareimg filepath hfhubdownload repoidhfinternaltestingfixturesade20k repotypedataset filenameadeval00000001 jpg image image openfilepath convertrgb return image requiretorch requirevision slow class upernetmodelintegrationtestunittest testcase def testinferenceswinbackboneself processor autoimageprocessor frompretrainedopenmmlabupernetswintiny model upernetforsemanticsegmentation frompretrainedopenmmlabupernetswintiny totorchdevice image prepareimg inputs processorimagesimage returntensorspt totorchdevice with torch nograd outputs modelinputs expectedshape torch size1 model config numlabels 512 512 self assertequaloutputs logits shape expectedshape expectedslice torch tensor 7 5958 7 5958 7 4302 7 5958 7 5958 7 4302 7 4797 7 4797 7 3068 totorchdevice self asserttruetorch allcloseoutputs logits0 0 3 3 expectedslice atol1e4 def testinferenceconvnextbackboneself processor autoimageprocessor frompretrainedopenmmlabupernetconvnexttiny model upernetforsemanticsegmentation frompretrainedopenmmlabupernetconvnexttiny totorchdevice image prepareimg inputs processorimagesimage returntensorspt totorchdevice with torch nograd outputs modelinputs expectedshape torch size1 model config numlabels 512 512 self assertequaloutputs logits shape expectedshape expectedslice torch tensor 8 8110 8 8110 8 6521 8 8110 8 8110 8 6521 8 7746 8 7746 8 6130 totorchdevice self asserttruetorch allcloseoutputs logits0 0 3 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch upernet framework here we also overwrite some of the tests of test_modeling_common py as upernet does not use input_ids inputs_embeds attention_mask and seq_length convnext s feature maps are of shape batch_size num_channels height width check that output_hidden_states also work using config we will verify our results on an image of ade20k
import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UperNetModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 1, 1], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, out_features=["stage2", "stage3", "stage4"], num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.out_features = out_features self.num_labels = num_labels self.scope = scope self.num_hidden_layers = num_stages def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_backbone_config(self): return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def get_config(self): return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=64, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=32, auxiliary_num_convs=1, auxiliary_concat_input=False, loss_ignore_index=255, num_labels=self.num_labels, ) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels): model = UperNetForSemanticSegmentation(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UperNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (UperNetForSemanticSegmentation,) if is_torch_available() else () pipeline_model_mapping = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = UperNetModelTester(self) self.config_tester = ConfigTester(self, config_class=UperNetConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @unittest.skip(reason="UperNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="UperNet does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="UperNet does not have a base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="UperNet does not have a base model") def test_save_load_fast_init_to_base(self): pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`") def test_multi_gpu_data_parallel_forward(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.backbone_config = _config_zero_init(configs_no_init.backbone_config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="UperNet does not have tied weights") def test_tied_model_weights_key_ignore(self): pass @slow def test_model_from_pretrained(self): for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = UperNetForSemanticSegmentation.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): filepath = hf_hub_download( repo_id="hf-internal-testing/fixtures_ade20k", repo_type="dataset", filename="ADE_val_00000001.jpg" ) image = Image.open(filepath).convert("RGB") return image @require_torch @require_vision @slow class UperNetModelIntegrationTest(unittest.TestCase): def test_inference_swin_backbone(self): processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny") model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny").to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4)) def test_inference_convnext_backbone(self): processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny") model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny").to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4))
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize imageprocessing create random pil videos test not batched input test batched initialize imageprocessing create random numpy tensors test not batched input test batched initialize imageprocessing create random numpy tensors test not batched input test batched initialize imageprocessing create random pytorch tensors test not batched input test batched coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize image_processing create random pil videos test not batched input test batched initialize image_processing create random numpy tensors test not batched input test batched initialize image_processing create random numpy tensors test not batched input test batched initialize image_processing create random pytorch tensors test not batched input test batched
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VideoMAEImageProcessor class VideoMAEImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, num_frames=10, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], crop_size=None, ): size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_frames = num_frames self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.crop_size = crop_size def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } def expected_output_image_shape(self, images): return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class VideoMAEImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VideoMAEImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = VideoMAEImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) def test_call_numpy_4_channels(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.image_processor_tester.num_channels = 4 video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) encoded_videos = image_processing( video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) encoded_videos = image_processing( video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) )
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch videomae model import copy import unittest import numpy as np from huggingfacehub import hfhubdownload from transformers import videomaeconfig from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import modelforvideoclassificationmapping videomaeforpretraining videomaeforvideoclassification videomaemodel from transformers models videomae modelingvideomae import videomaepretrainedmodelarchivelist if isvisionavailable from transformers import videomaeimageprocessor class videomaemodeltester def init self parent batchsize13 imagesize10 numchannels3 patchsize2 tubeletsize2 numframes2 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 maskratio0 9 scopenone self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self patchsize patchsize self tubeletsize tubeletsize self numframes numframes self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self maskratio maskratio self scope scope in videomae the number of tokens equals numframestubeletsize numpatches per frame self numpatchesperframe imagesize patchsize 2 self seqlength numframes tubeletsize self numpatchesperframe use this variable to define boolmaskedpos self nummasks intmaskratio self seqlength def prepareconfigandinputsself pixelvalues floatstensor self batchsize self numframes self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return videomaeconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels numframesself numframes tubeletsizeself tubeletsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange decoderhiddensizeself hiddensize decoderintermediatesizeself intermediatesize decodernumattentionheadsself numattentionheads decodernumhiddenlayersself numhiddenlayers def createandcheckmodelself config pixelvalues labels model videomaemodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforpretrainingself config pixelvalues labels model videomaeforpretrainingconfig model totorchdevice model eval important each video needs to have the same number of masked patches hence we define a single mask which we then repeat for each example in the batch mask torch onesself nummasks mask torch catmask torch zerosself seqlength mask size0 boolmaskedpos mask expandself batchsize 1 bool result modelpixelvalues boolmaskedpos model only returns predictions for masked patches nummaskedpatches mask sum item decodernumlabels 3 self tubeletsize self patchsize2 self parent assertequalresult logits shape self batchsize nummaskedpatches decodernumlabels def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class videomaemodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses videomaemodel videomaeforpretraining videomaeforvideoclassification if istorchavailable else pipelinemodelmapping featureextraction videomaemodel videoclassification videomaeforvideoclassification if istorchavailable else testpruning false testtorchscript false testresizeembeddings false testheadmasking false def setupself self modeltester videomaemodeltesterself self configtester configtesterself configclassvideomaeconfig hastextmodalityfalse hiddensize37 def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict copy deepcopyinputsdict if modelclass videomaeforpretraining important each video needs to have the same number of masked patches hence we define a single mask which we then repeat for each example in the batch mask torch onesself modeltester nummasks mask torch catmask torch zerosself modeltester seqlength mask size0 boolmaskedpos mask expandself modeltester batchsize 1 bool inputsdictboolmaskedpos boolmaskedpos totorchdevice if returnlabels if modelclass in getvaluesmodelforvideoclassificationmapping inputsdictlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice return inputsdict def testconfigself self configtester runcommontests unittest skipreasonvideomae does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforpretrainingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforpretrainingconfigandinputs slow def testmodelfrompretrainedself for modelname in videomaepretrainedmodelarchivelist 1 model videomaemodel frompretrainedmodelname self assertisnotnonemodel def testattentionoutputsself if not self hasattentions pass else config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses numvisiblepatches self modeltester seqlength self modeltester nummasks seqlen numvisiblepatches if modelclass videomaeforpretraining else self modeltester seqlength inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads seqlen seqlen outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads seqlen seqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers numvisiblepatches self modeltester seqlength self modeltester nummasks seqlength numvisiblepatches if modelclass videomaeforpretraining else self modeltester seqlength self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass we will verify our results on a video of eating spaghetti frame indices used 164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227 def preparevideo file hfhubdownload repoidhfinternaltestingspaghettivideo filenameeatingspaghetti npy repotypedataset video np loadfile return listvideo requiretorch requirevision class videomaemodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself logits were tested with a different mean and std so we use the same here return videomaeimageprocessorimagemean0 5 0 5 0 5 imagestd0 5 0 5 0 5 if isvisionavailable else none slow def testinferenceforvideoclassificationself model videomaeforvideoclassification frompretrainedmcgnjuvideomaebasefinetunedkinetics to torchdevice imageprocessor self defaultimageprocessor video preparevideo inputs imageprocessorvideo returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 400 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 3669 0 0688 0 2421 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 slow def testinferenceforpretrainingself model videomaeforpretraining frompretrainedmcgnjuvideomaebaseshort totorchdevice imageprocessor self defaultimageprocessor video preparevideo inputs imageprocessorvideo returntensorspt totorchdevice add boolean mask indicating which patches to mask localpath hfhubdownloadrepoidhfinternaltestingboolmaskedpos filenameboolmaskedpos pt inputsboolmaskedpos torch loadlocalpath forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1408 1536 expectedslice torch tensor 0 7994 0 9612 0 8508 0 7401 0 8958 0 8302 0 5862 0 7468 0 7325 devicetorchdevice self assertequaloutputs logits shape expectedshape self asserttruetorch allcloseoutputs logits0 3 3 expectedslice atol1e4 verify the loss config normpixloss true expectedloss torch tensor0 5142 devicetorchdevice self asserttruetorch allcloseoutputs loss expectedloss atol1e4 verify the loss config normpixloss false model videomaeforpretraining frompretrainedmcgnjuvideomaebaseshort normpixlossfalse to torchdevice with torch nograd outputs modelinputs expectedloss torch tensortorch tensor0 6469 devicetorchdevice self asserttruetorch allcloseoutputs loss expectedloss atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch videomae model in videomae the number of tokens equals num_frames tubelet_size num_patches per frame use this variable to define bool_masked_pos important each video needs to have the same number of masked patches hence we define a single mask which we then repeat for each example in the batch model only returns predictions for masked patches here we also overwrite some of the tests of test_modeling_common py as videomae does not use input_ids inputs_embeds attention_mask and seq_length important each video needs to have the same number of masked patches hence we define a single mask which we then repeat for each example in the batch check that output_attentions also work using config check attention is always last and order is fine check that output_hidden_states also work using config we will verify our results on a video of eating spaghetti frame indices used 164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227 logits were tested with a different mean and std so we use the same here forward pass verify the logits add boolean mask indicating which patches to mask forward pass verify the logits verify the loss config norm_pix_loss true verify the loss config norm_pix_loss false
import copy import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class VideoMAEModelTester: def __init__( self, parent, batch_size=13, image_size=10, num_channels=3, patch_size=2, tubelet_size=2, num_frames=2, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, mask_ratio=0.9, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.patch_size = patch_size self.tubelet_size = tubelet_size self.num_frames = num_frames self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.mask_ratio = mask_ratio self.scope = scope self.num_patches_per_frame = (image_size // patch_size) ** 2 self.seq_length = (num_frames // tubelet_size) * self.num_patches_per_frame self.num_masks = int(mask_ratio * self.seq_length) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return VideoMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, decoder_hidden_size=self.hidden_size, decoder_intermediate_size=self.intermediate_size, decoder_num_attention_heads=self.num_attention_heads, decoder_num_hidden_layers=self.num_hidden_layers, ) def create_and_check_model(self, config, pixel_values, labels): model = VideoMAEModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = VideoMAEForPreTraining(config) model.to(torch_device) model.eval() mask = torch.ones((self.num_masks,)) mask = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))]) bool_masked_pos = mask.expand(self.batch_size, -1).bool() result = model(pixel_values, bool_masked_pos) num_masked_patches = mask.sum().item() decoder_num_labels = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VideoMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VideoMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=VideoMAEConfig, has_text_modality=False, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class == VideoMAEForPreTraining: mask = torch.ones((self.model_tester.num_masks,)) mask = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) bool_masked_pos = mask.expand(self.model_tester.batch_size, -1).bool() inputs_dict["bool_masked_pos"] = bool_masked_pos.to(torch_device) if return_labels: if model_class in [ *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="VideoMAE does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = VideoMAEModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks seq_len = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks seq_length = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class VideoMAEModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def test_inference_for_video_classification(self): model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics").to( torch_device ) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_for_pretraining(self): model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short").to(torch_device) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") inputs["bool_masked_pos"] = torch.load(local_path) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=torch_device ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)) expected_loss = torch.tensor([0.5142], device=torch_device) self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4)) model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short", norm_pix_loss=False).to( torch_device ) with torch.no_grad(): outputs = model(**inputs) expected_loss = torch.tensor(torch.tensor([0.6469]), device=torch_device) self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4))
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function computes the expected height and width when providing images to viltimageprocessor assuming doresize is set to true with a scalar size and sizedivisor coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this function computes the expected height and width when providing images to viltimageprocessor assuming do_resize is set to true with a scalar size and size_divisor
import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from PIL import Image from transformers import ViltImageProcessor class ViltImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, size_divisor=2, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"shortest_edge": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.size_divisor = size_divisor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def get_expected_values(self, image_inputs, batched=False): if not batched: size = self.size["shortest_edge"] image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] scale = size / min(w, h) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size max_size = int((1333 / 800) * size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return (self.num_channels, height, width) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ViltImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ViltImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ViltImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "size_divisor")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 30}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"shortest_edge": 42})
codingutf8 2021 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make the decoder inputs a different shape from the encoder inputs to harden the test copied from generation utils gpt2 doesn t have padtokenid bert does not have a bos token id so use padtokenid instead copied from testmodelingencoderdecoder py prepare inputs pt flax flax pt encoderhiddenstates is not used in model callforward avoid the case where a sequence has no place to attend after combined with the causal attention mask flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model check without enctodecproj projection check enctodecproj work as expected make sure that cross attention layers are added we will verify our results on an image of cute cats verify the logits should produce a cat laying on top of a couch next to another cat coding utf 8 2021 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license make the decoder inputs a different shape from the encoder inputs to harden the test copied from generation utils gpt2 doesn t have pad_token_id bert does not have a bos token id so use pad_token_id instead copied from test_modeling_encoder_decoder py prepare inputs pt flax flax pt encoder_hidden_states is not used in model call forward avoid the case where a sequence has no place to attend after combined with the causal attention mask flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model check without enc_to_dec_proj projection check enc_to_dec_proj work as expected make sure that cross attention layers are added this is not used in the tests we will verify our results on an image of cute cats verify the logits should produce a cat laying on top of a couch next to another cat
import tempfile import unittest import numpy as np from transformers import is_flax_available, is_torch_available, is_vision_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, require_vision, slow, torch_device from ...test_modeling_flax_common import floats_tensor, ids_tensor from ..gpt2.test_modeling_flax_gpt2 import FlaxGPT2ModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( AutoTokenizer, FlaxGPT2LMHeadModel, FlaxVisionEncoderDecoderModel, FlaxViTModel, VisionEncoderDecoderConfig, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionEncoderDecoderModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor @require_flax class FlaxEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = FlaxVisionEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model_from_pretrained( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_save_and_load( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) FlaxVisionEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_output_attentions( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) self.assertEqual(encoder_attentions[0].shape[-3:-2], (config.num_attention_heads,)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] * ( 1 + (decoder_config.ngram if hasattr(decoder_config, "ngram") else 0) ) self.assertEqual( cross_attentions[0].shape[-3:-1], (decoder_config.num_attention_heads, cross_attention_input_seq_len), ) def check_encoder_decoder_model_generate(self, pixel_values, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) pad_token_id = enc_dec_model.config.decoder.pad_token_id eos_token_id = enc_dec_model.config.decoder.eos_token_id decoder_start_token_id = enc_dec_model.config.decoder.decoder_start_token_id if pad_token_id is None and eos_token_id is not None: pad_token_id = eos_token_id if decoder_start_token_id is None: decoder_start_token_id = enc_dec_model.config.decoder.bos_token_id if decoder_start_token_id is None: decoder_start_token_id = pad_token_id generated_output = enc_dec_model.generate( pixel_values, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, ) generated_sequences = generated_output.sequences self.assertEqual(generated_sequences.shape, (pixel_values.shape[0],) + (decoder_config.max_length,)) def check_pt_flax_equivalence(self, pt_model, fx_model, inputs_dict): pt_model.to(torch_device) pt_model.eval() flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output, pt_output.numpy(), 1e-5) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxVisionEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 1e-5) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = VisionEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 1e-5) def check_equivalence_pt_to_flax(self, config, decoder_config, inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = VisionEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxVisionEncoderDecoderModel(encoder_decoder_config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def check_equivalence_flax_to_pt(self, config, decoder_config, inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = VisionEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxVisionEncoderDecoderModel(encoder_decoder_config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def test_encoder_decoder_model_from_pretrained_configs(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**config_inputs_dict) def test_encoder_decoder_model_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=True) def test_save_and_load_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_save_and_load(**config_inputs_dict) def test_encoder_decoder_model_output_attentions(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**config_inputs_dict) def test_encoder_decoder_model_generate(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**config_inputs_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") inputs_dict = config_inputs_dict del inputs_dict["encoder_hidden_states"] batch_size = inputs_dict["decoder_attention_mask"].shape[0] inputs_dict["decoder_attention_mask"] = np.concatenate( [np.ones(shape=(batch_size, 1)), inputs_dict["decoder_attention_mask"][:, 1:]], axis=1 ) decoder_config.use_cache = False self.assertTrue(decoder_config.cross_attention_hidden_size is None) self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() pixel_values = floats_tensor( [ 13, model_2.config.encoder.num_channels, model_2.config.encoder.image_size, model_2.config.encoder.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) outputs = model_2( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = FlaxVisionEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_flax class FlaxViT2GPT2EncoderDecoderModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxViTModel(config) decoder_model = FlaxGPT2LMHeadModel(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxViTModelTester(self, batch_size=13) model_tester_decoder = FlaxGPT2ModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, pixel_values) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs decoder_config.add_cross_attention = True return { "config": config, "pixel_values": pixel_values, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } def get_pretrained_model(self): return FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "gpt2" ) @require_flax class FlaxVisionEncoderDecoderModelTest(unittest.TestCase): def get_from_encoderdecoder_pretrained_model(self): return FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "gpt2" ) def _check_configuration_tie(self, model): module = model.module.bind(model.params) assert id(module.decoder.config) == id(model.config.decoder) assert id(module.encoder.config) == id(model.config.encoder) @slow def test_configuration_tie(self): model = self.get_from_encoderdecoder_pretrained_model() self._check_configuration_tie(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_flax class FlaxViT2GPT2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_coco_en(self): loc = "ydshieh/vit-gpt2-coco-en" image_processor = ViTImageProcessor.from_pretrained(loc) tokenizer = AutoTokenizer.from_pretrained(loc) model = FlaxVisionEncoderDecoderModel.from_pretrained(loc) img = prepare_img() pixel_values = image_processor(images=img, return_tensors="np").pixel_values decoder_input_ids = np.array([[model.config.decoder_start_token_id]]) logits = model(pixel_values, decoder_input_ids)[0] logits = np.array(logits) expected_shape = (1, 1, model.config.decoder.vocab_size) self.assertEqual(logits.shape, expected_shape) EXPECTED_LOGIT_SLICE = np.array( [ -38.705837, -30.639936, -31.41905, -39.01204, -38.38698, -34.887215, -33.29087, -35.684475, -38.50852, -36.124676, ] ) max_diff = np.amax(np.abs(logits[0, 0, :10] - EXPECTED_LOGIT_SLICE)) self.assertLessEqual(max_diff, 1e-4) def generate_step(pixel_values): outputs = model.generate(pixel_values, max_length=16, num_beams=4) output_ids = outputs.sequences preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) preds = [pred.strip() for pred in preds] return preds, outputs.scores preds, scores = generate_step(pixel_values) EXPECTED_SCORES = np.array([-0.59563464]) scores = np.array(scores) max_diff = np.amax(np.abs(scores - EXPECTED_SCORES)) self.assertLessEqual(max_diff, 1e-4) self.assertEqual(preds, ["a cat laying on top of a couch next to another cat"])
codingutf8 2022 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow visionencoderdecoder model from future import annotations import copy import os import tempfile import unittest import numpy as np from transformers import istfavailable istorchavailable isvisionavailable from transformers testingutils import ispttfcrosstest requiretf requiretorch requirevision slow torchdevice from transformers utils generic import modeloutput from testmodelingtfcommon import floatstensor idstensor from gpt2 testmodelingtfgpt2 import tfgpt2modeltester from vit testmodelingtfvit import tfvitmodeltester if istfavailable import tensorflow as tf from transformers import autoconfig autoimageprocessor autotokenizer tfautomodel tfautomodelforcausallm tfgpt2lmheadmodel tfvisionencoderdecodermodel tfvitmodel visionencoderdecoderconfig from transformers modelingtfoutputs import tfbasemodeloutput if istorchavailable import torch from transformers import gpt2lmheadmodel visionencoderdecodermodel vitmodel if isvisionavailable from pil import image from transformers import vitimageprocessor requiretf class tfvisionencoderdecodermixin def getencoderdecodermodelself config decoderconfig raise notimplementederror def prepareconfigandinputsself raise notimplementederror def getpretrainedmodelself raise notimplementederror def checkencoderdecodermodelfrompretrainedconfigs self config pixelvalues encoderhiddenstates decoderconfig decoderinputids decoderattentionmask kwargs encoderdecoderconfig visionencoderdecoderconfig fromencoderdecoderconfigsconfig decoderconfig self asserttrueencoderdecoderconfig decoder isdecoder encdecmodel tfvisionencoderdecodermodelencoderdecoderconfig self asserttrueencdecmodel config isencoderdecoder outputsencoderdecoder encdecmodel pixelvaluespixelvalues decoderinputidsdecoderinputids decoderattentionmaskdecoderattentionmask kwargskwargs self assertequal outputsencoderdecoderlogits shape decoderinputids shape decoderconfig vocabsize self assertequaloutputsencoderdecoderencoderlasthiddenstate shape0 pixelvalues shape0 self assertequaloutputsencoderdecoderencoderlasthiddenstate shape1 config hiddensize def checkencoderdecodermodel self config pixelvalues encoderhiddenstates decoderconfig decoderinputids decoderattentionmask kwargs encodermodel decodermodel self getencoderdecodermodelconfig decoderconfig encdecmodel tfvisionencoderdecodermodelencoderencodermodel decoderdecodermodel self asserttrueencdecmodel config decoder isdecoder self asserttrueencdecmodel config decoder addcrossattention self asserttrueencdecmodel config isencoderdecoder outputsencoderdecoder encdecmodel pixelvaluespixelvalues decoderinputidsdecoderinputids decoderattentionmaskdecoderattentionmask kwargskwargs self assertequal outputsencoderdecoderlogits shape decoderinputids shape decoderconfig vocabsize self assertequaloutputsencoderdecoderencoderlasthiddenstate shape0 pixelvalues shape0 self assertequaloutputsencoderdecoderencoderlasthiddenstate shape1 config hiddensize encoderoutputs tfbasemodeloutputlasthiddenstateencoderhiddenstates outputsencoderdecoder encdecmodel pixelvaluesnone encoderoutputsencoderoutputs decoderinputidsdecoderinputids decoderattentionmaskdecoderattentionmask kwargskwargs self assertequal outputsencoderdecoderlogits shape decoderinputids shape decoderconfig vocabsize self assertequaloutputsencoderdecoderencoderlasthiddenstate shape0 pixelvalues shape0 self assertequaloutputsencoderdecoderencoderlasthiddenstate shape1 config hiddensize def checkencoderdecodermodelfrompretrained self config pixelvalues encoderhiddenstates decoderconfig decoderinputids decoderattentionmask returndict kwargs encodermodel decodermodel self getencoderdecodermodelconfig decoderconfig kwargs encodermodel encodermodel decodermodel decodermodel returndict returndict encdecmodel tfvisionencoderdecodermodel fromencoderdecoderpretrainedkwargs outputsencoderdecoder encdecmodel pixelvaluespixelvalues decoderinputidsdecoderinputids decoderattentionmaskdecoderattentionmask returndicttrue kwargskwargs self assertequal outputsencoderdecoderlogits shape decoderinputids shape decoderconfig vocabsize self assertequaloutputsencoderdecoderencoderlasthiddenstate shape0 pixelvalues shape0 self assertequaloutputsencoderdecoderencoderlasthiddenstate shape1 config hiddensize def checksaveandload self config pixelvalues encoderhiddenstates decoderconfig decoderinputids decoderattentionmask kwargs encodermodel decodermodel self getencoderdecodermodelconfig decoderconfig encdecmodel tfvisionencoderdecodermodelencoderencodermodel decoderdecodermodel outputs encdecmodel pixelvaluespixelvalues decoderinputidsdecoderinputids decoderattentionmaskdecoderattentionmask kwargskwargs out2 np arrayoutputs0 out2np isnanout2 0 with tempfile temporarydirectory as tmpdirname encdecmodel savepretrainedtmpdirname encdecmodel tfvisionencoderdecodermodel frompretrainedtmpdirname afteroutputs encdecmodel pixelvaluespixelvalues decoderinputidsdecoderinputids decoderattentionmaskdecoderattentionmask kwargskwargs out1 np arrayafteroutputs0 out1np isnanout1 0 maxdiff np amaxnp absout1 out2 self assertlessequalmaxdiff 1e5 def checkencoderdecodermodellabels self config pixelvalues encoderhiddenstates decoderconfig decoderinputids decoderattentionmask labels kwargs encodermodel decodermodel self getencoderdecodermodelconfig decoderconfig encdecmodel tfvisionencoderdecodermodelencoderencodermodel decoderdecodermodel outputsencoderdecoder encdecmodel pixelvaluespixelvalues decoderinputidsdecoderinputids decoderattentionmaskdecoderattentionmask labelslabels kwargskwargs make sure loss exist self assertinloss outputsencoderdecoder batchsize seqlen decoderinputids shape expectedshape batchsize seqlen decoderconfig vocabsize self assertequaloutputsencoderdecoderlogits shape expectedshape self assertequaloutputsencoderdecoderencoderlasthiddenstate shape0 pixelvalues shape0 self assertequaloutputsencoderdecoderencoderlasthiddenstate shape1 config hiddensize def checkencoderdecodermodeloutputattentions self config pixelvalues encoderhiddenstates decoderconfig decoderinputids decoderattentionmask kwargs make the decoder inputs a different shape from the encoder inputs to harden the test decoderinputids decoderinputids 1 decoderattentionmask decoderattentionmask 1 encodermodel decodermodel self getencoderdecodermodelconfig decoderconfig encdecmodel tfvisionencoderdecodermodelencoderencodermodel decoderdecodermodel outputsencoderdecoder encdecmodel pixelvaluespixelvalues decoderinputidsdecoderinputids decoderattentionmaskdecoderattentionmask outputattentionstrue kwargskwargs encoderattentions outputsencoderdecoderencoderattentions self assertequallenencoderattentions config numhiddenlayers self assertequalencoderattentions0 shape3 2 config numattentionheads decoderattentions outputsencoderdecoderdecoderattentions numdecoderlayers decoderconfig numdecoderlayers if hasattrdecoderconfig numdecoderlayers else decoderconfig numhiddenlayers self assertequallendecoderattentions numdecoderlayers self assertequal decoderattentions0 shape3 decoderconfig numattentionheads decoderinputids shape1 decoderinputids shape1 crossattentions outputsencoderdecodercrossattentions self assertequallencrossattentions numdecoderlayers crossattentioninputseqlen decoderinputids shape1 1 decoderconfig ngram if hasattrdecoderconfig ngram else 0 self assertequal crossattentions0 shape3 1 decoderconfig numattentionheads crossattentioninputseqlen def checkencoderdecodermodelgenerateself pixelvalues config decoderconfig kwargs encodermodel decodermodel self getencoderdecodermodelconfig decoderconfig encdecmodel tfvisionencoderdecodermodelencoderencodermodel decoderdecodermodel generate until max length if hasattrencdecmodel config eostokenid encdecmodel config eostokenid none if hasattrencdecmodel config decoder and hasattrencdecmodel config decoder eostokenid encdecmodel config decoder eostokenid none bert does not have a bos token id so use padtokenid instead generatedoutput encdecmodel generate pixelvalues decoderstarttokenidencdecmodel config decoder padtokenid self assertequal tuplegeneratedoutput shape aslist pixelvalues shape0 decoderconfig maxlength def checkpttfoutputsself tfoutputs ptoutputs modelclass tol1e5 nameoutputs attributesnone self assertequaltypename str if attributes is not none self assertequaltypeattributes tuple fname the argument attributes should be a tuple allow modeloutput e g clipoutput has textmodeloutput and visionmodeloutput if isinstancetfoutputs modeloutput self asserttrue isinstanceptoutputs modeloutput fname ptoutputs should an instance of modeloutput when tfoutputs is tfkeys k for k v in tfoutputs items if v is not none ptkeys k for k v in ptoutputs items if v is not none self assertequaltfkeys ptkeys fname output keys differ between tf and pytorch convert to the case of tuple appending each key to the current string names attributes tuplefname k for k in tfkeys self checkpttfoutputs tfoutputs totuple ptoutputs totuple modelclass toltol namename attributesattributes allow list e g transfoxlmodeloutput mems is a list of tensors elif typetfoutputs in tuple list self assertequaltypetfoutputs typeptoutputs fname output types differ between tf and pytorch self assertequallentfoutputs lenptoutputs fname output lengths differ between tf and pytorch if attributes is not none case 1 each output has assigned name e g a tuple form of a modeloutput self assertequal lenattributes lentfoutputs fname the tuple names should have the same length as tfoutputs else case 2 each output has no assigned name e g hidden states of each layer add an index to names attributes tuplefnameidx for idx in rangelentfoutputs for tfoutput ptoutput attr in ziptfoutputs ptoutputs attributes self checkpttfoutputstfoutput ptoutput modelclass toltol nameattr elif isinstancetfoutputs tf tensor self asserttrue isinstanceptoutputs torch tensor fname ptoutputs should a tensor when tfoutputs is tfoutputs tfoutputs numpy ptoutputs ptoutputs detach tocpu numpy self assertequal tfoutputs shape ptoutputs shape fname output shapes differ between tf and pytorch deal with numpy s scalars to make replacing nan values by 0 work if np isscalartfoutputs tfoutputs np arraytfoutputs ptoutputs np arrayptoutputs tfnans np isnantfoutputs ptnans np isnanptoutputs ptoutputstfnans 0 tfoutputstfnans 0 ptoutputsptnans 0 tfoutputsptnans 0 maxdiff np amaxnp abstfoutputs ptoutputs self assertlessequalmaxdiff tol fname difference between torch and tf is maxdiff tol else raise valueerror tfoutputs should be an instance of tf tensor a tuple or an instance of tf tensor got f typetfoutputs instead def prepareptinputsfromtfinputsself tfinputsdict ptinputsdict for name key in tfinputsdict items if isinstancekey bool ptinputsdictname key elif name inputvalues ptinputsdictname torch fromnumpykey numpy totorch float32 elif name pixelvalues ptinputsdictname torch fromnumpykey numpy totorch float32 elif name inputfeatures ptinputsdictname torch fromnumpykey numpy totorch float32 other general float inputs elif tfinputsdictname dtype isfloating ptinputsdictname torch fromnumpykey numpy totorch float32 else ptinputsdictname torch fromnumpykey numpy totorch long return ptinputsdict def checkpttfmodelsself tfmodel ptmodel tfinputsdict ptinputsdict self prepareptinputsfromtfinputstfinputsdict send pytorch inputs to the correct device ptinputsdict k v todevicetorchdevice if isinstancev torch tensor else v for k v in ptinputsdict items send pytorch model to the correct device ptmodel totorchdevice check predictions on first output logitshiddenstates are close enough given lowlevel computational differences ptmodel eval with torch nograd ptoutputs ptmodelptinputsdict tfoutputs tfmodeltfinputsdict tf models returned loss is usually a tensor rather than a scalar see hfcomputeloss it uses tf keras losses reduction none change it here to a scalar to match pytorch models loss tfloss getattrtfoutputs loss none if tfloss is not none tfoutputs loss tf math reducemeantfloss self checkpttfoutputstfoutputs ptoutputs typetfmodel def checkpttfequivalenceself tfmodel ptmodel tfinputsdict pt tf output all for aggressive testing all models tested in this file have attentions output all for aggressive testing todo a generalizable way to determine this attribute make sure model is built before saving keep only common arguments output all for aggressive testing all models tested in this file have attentions encoderhiddenstates is not used in model callforward make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e4 1e9 1e30 inf make sure no all 0s attention masks to avoid failure at this moment put 1 at the beginning of sequences to make it still work when combining causal attention masks todo remove this line once a fix regarding large negative values for attention mask is done original test check without labels and without enctodecproj projection check with labels check enctodecproj work as expected make sure that cross attention layers are added disable cache for now we will verify our results on an image of cute cats create two random vitgpt2 models for vitgpt2 initialize weights crossattention weights create two random vitgpt2 models for vitgpt2 initialize weights crossattention weights pytorch tensorflow make sure frompretrained following savepretrained work and give the same result see https github comhuggingfacetransformerspull14016 since most of hf s models don t have pretrained crossattention layers they are randomly initialized even if we create models using frompretrained method for the tests the decoder need to be a model with pretrained crossattention layers so we create pretrained models without loadweightprefix save them and later we load them using frompretrained we don t need to do this for encoder but let s make the code more similar between encoderdecoder it s necessary to specify addcrossattentiontrue here check that the from pretrained methods work create the model using init with loaded pretrained encoder decoder we will verify our results on an image of cute cats verify the logits should produce a cat laying on top of a couch next to another cat coding utf 8 2022 huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow visionencoderdecoder model make sure loss exist make the decoder inputs a different shape from the encoder inputs to harden the test generate until max length bert does not have a bos token id so use pad_token_id instead check the outputs from pytorch and tensorflow models are close enough checks are done in a recursive way args model_class the class of the model that is currently testing for example tfbertmodel tfbertformaskedlm tfbertforsequenceclassification etc mainly used for providing more informative error messages name str the name of the output for example output hidden_states output attentions etc attributes tuple str the names of the output s element if the output is a tuple list with each element being a named field in the output allow modeloutput e g clipoutput has text_model_output and vision_model_output convert to the case of tuple appending each key to the current string names allow list e g transfoxlmodeloutput mems is a list of tensors case 1 each output has assigned name e g a tuple form of a modeloutput case 2 each output has no assigned name e g hidden states of each layer add an index to names deal with numpy s scalars to make replacing nan values by 0 work other general float inputs send pytorch inputs to the correct device send pytorch model to the correct device check predictions on first output logits hidden states are close enough given low level computational differences tf models returned loss is usually a tensor rather than a scalar see hf_compute_loss it uses tf keras losses reduction none change it here to a scalar to match pytorch models loss wrap check_pt_tf_models to further check pt tf again pt tf output all for aggressive testing all models tested in this file have attentions output all for aggressive testing todo a generalizable way to determine this attribute make sure model is built before saving keep only common arguments output all for aggressive testing all models tested in this file have attentions encoder_hidden_states is not used in model call forward make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e 4 1e 9 1e 30 inf make sure no all 0s attention masks to avoid failure at this moment put 1 at the beginning of sequences to make it still work when combining causal attention masks todo remove this line once a fix regarding large negative values for attention mask is done original test check without labels and without enc_to_dec_proj projection check with labels check enc_to_dec_proj work as expected make sure that cross attention layers are added disable cache for now this is not used in the tests we will verify our results on an image of cute cats create two random vit gpt2 models for vit gpt2 initialize weights cross_attention weights create two random vit gpt2 models for vit gpt2 initialize weights cross_attention weights pytorch tensorflow make sure from_pretrained following save_pretrained work and give the same result see https github com huggingface transformers pull 14016 since most of hf s models don t have pretrained cross attention layers they are randomly initialized even if we create models using from_pretrained method for the tests the decoder need to be a model with pretrained cross attention layers so we create pretrained models without load_weight_prefix save them and later we load them using from_pretrained we don t need to do this for encoder but let s make the code more similar between encoder decoder it s necessary to specify add_cross_attention true here check that the from pretrained methods work create the model using __init__ with loaded pretrained encoder decoder we will verify our results on an image of cute cats verify the logits should produce a cat laying on top of a couch next to another cat
from __future__ import annotations import copy import os import tempfile import unittest import numpy as np from transformers import is_tf_available, is_torch_available, is_vision_available from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_vision, slow, torch_device, ) from transformers.utils.generic import ModelOutput from ...test_modeling_tf_common import floats_tensor, ids_tensor from ..gpt2.test_modeling_tf_gpt2 import TFGPT2ModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): import tensorflow as tf from transformers import ( AutoConfig, AutoImageProcessor, AutoTokenizer, TFAutoModel, TFAutoModelForCausalLM, TFGPT2LMHeadModel, TFVisionEncoderDecoderModel, TFViTModel, VisionEncoderDecoderConfig, ) from transformers.modeling_tf_outputs import TFBaseModelOutput if is_torch_available(): import torch from transformers import GPT2LMHeadModel, VisionEncoderDecoderModel, ViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor @require_tf class TFVisionEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = TFVisionEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_hidden_states) outputs_encoder_decoder = enc_dec_model( pixel_values=None, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model_from_pretrained( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, return_dict=True, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_save_and_load( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_labels( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, labels, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels, kwargs=kwargs, ) self.assertIn("loss", outputs_encoder_decoder) batch_size, seq_len = decoder_input_ids.shape expected_shape = (batch_size, seq_len, decoder_config.vocab_size) self.assertEqual(outputs_encoder_decoder["logits"].shape, expected_shape) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model_output_attentions( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, kwargs=kwargs, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) self.assertEqual(encoder_attentions[0].shape[-3:-2], (config.num_attention_heads,)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] * ( 1 + (decoder_config.ngram if hasattr(decoder_config, "ngram") else 0) ) self.assertEqual( cross_attentions[0].shape[-3:-1], (decoder_config.num_attention_heads, cross_attention_input_seq_len), ) def check_encoder_decoder_model_generate(self, pixel_values, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None generated_output = enc_dec_model.generate( pixel_values, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual( tuple(generated_output.shape.as_list()), (pixel_values.shape[0],) + (decoder_config.max_length,) ) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `names` should have the same length as `tf_outputs`", ) else: attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): if isinstance(key, bool): pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "pixel_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "input_features": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif tf_inputs_dict[name].dtype.is_floating: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) else: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long) return pt_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict) pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } pt_model.to(torch_device) pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model)) def check_pt_tf_equivalence(self, tf_model, pt_model, tf_inputs_dict): self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) encoder_decoder_config.output_hidden_states = True encoder_decoder_config.output_attentions = True pt_model = VisionEncoderDecoderModel(encoder_decoder_config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def check_tf_to_pt_equivalence(self, config, decoder_config, tf_inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) encoder_decoder_config.output_hidden_states = True encoder_decoder_config.output_attentions = True tf_model = TFVisionEncoderDecoderModel(encoder_decoder_config) tf_model(**tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: tf_model.save_pretrained(tmpdirname, safe_serialization=False) pt_model = VisionEncoderDecoderModel.from_pretrained(tmpdirname, from_tf=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def test_encoder_decoder_model(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**config_inputs_dict) def test_encoder_decoder_model_from_pretrained_configs(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**config_inputs_dict) def test_encoder_decoder_model_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=True) def test_save_and_load_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_save_and_load(**config_inputs_dict) def test_encoder_decoder_model_labels(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_labels(**config_inputs_dict) def test_encoder_decoder_model_output_attentions(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**config_inputs_dict) def test_encoder_decoder_model_generate(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**config_inputs_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and tf is {diff} (>= {tol}).") @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() labels = config_inputs_dict.pop("decoder_token_labels") arg_names = [ "config", "pixel_values", "decoder_config", "decoder_input_ids", "decoder_attention_mask", "encoder_hidden_states", ] config_inputs_dict = {k: v for k, v in config_inputs_dict.items() if k in arg_names} config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") config.output_hidden_states = True decoder_config.output_hidden_states = True config.output_attentions = True decoder_config.output_attentions = True tf_inputs_dict = config_inputs_dict del tf_inputs_dict["encoder_hidden_states"] for k in ["decoder_attention_mask"]: attention_mask = tf_inputs_dict[k] attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) tf_inputs_dict[k] = attention_mask tf_inputs_dict_with_labels = copy.copy(tf_inputs_dict) tf_inputs_dict_with_labels["labels"] = labels self.assertTrue(decoder_config.cross_attention_hidden_size is None) self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict_with_labels) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict_with_labels) decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() pixel_values = floats_tensor( [ 13, model_2.config.encoder.num_channels, model_2.config.encoder.image_size, model_2.config.encoder.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) outputs = model_2( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_tf class TFViT2GPT2EncoderDecoderModelTest(TFVisionEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model(self): return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained("google/vit-base-patch16-224-in21k", "gpt2") def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFViTModel(config, name="encoder") decoder_model = TFGPT2LMHeadModel(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = TFViTModelTester(self, batch_size=13) model_tester_decoder = TFGPT2ModelTester(self) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, pixel_values, labels) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, decoder_head_mask, decoder_token_type_ids, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs decoder_config.add_cross_attention = True decoder_config.use_cache = False return { "config": config, "pixel_values": pixel_values, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "decoder_token_labels": decoder_token_labels, "encoder_hidden_states": encoder_hidden_states, "labels": decoder_token_labels, } @require_tf class TFVisionEncoderDecoderModelTest(unittest.TestCase): def get_from_encoderdecoder_pretrained_model(self): return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained("google/vit-base-patch16-224-in21k", "gpt2") def get_decoder_config(self): config = AutoConfig.from_pretrained("gpt2") config.is_decoder = True config.add_cross_attention = True return config def get_encoderdecoder_model(self): return TFVisionEncoderDecoderModel.from_pretrained("ydshieh/vit-gpt2-coco-en") def get_encoder_decoder_models(self): encoder_model = TFViTModel.from_pretrained("google/vit-base-patch16-224-in21k", name="encoder") decoder_model = TFGPT2LMHeadModel.from_pretrained("gpt2", config=self.get_decoder_config(), name="decoder") return {"encoder": encoder_model, "decoder": decoder_model} def _check_configuration_tie(self, model): assert id(model.decoder.config) == id(model.config.decoder) assert id(model.encoder.config) == id(model.config.encoder) @slow def test_configuration_tie(self): model = self.get_from_encoderdecoder_pretrained_model() self._check_configuration_tie(model) model = TFVisionEncoderDecoderModel(**self.get_encoder_decoder_models()) self._check_configuration_tie(model) model = self.get_encoderdecoder_model() self._check_configuration_tie(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf class TFVisionEncoderDecoderModelSaveLoadTests(unittest.TestCase): def get_encoder_decoder_config(self): encoder_config = AutoConfig.from_pretrained("google/vit-base-patch16-224-in21k") decoder_config = AutoConfig.from_pretrained("gpt2", is_decoder=True, add_cross_attention=True) return VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def get_encoder_decoder_config_small(self): encoder_config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-vit") decoder_config = AutoConfig.from_pretrained( "hf-internal-testing/tiny-random-gpt2", is_decoder=True, add_cross_attention=True ) return VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def test_encoder_decoder_save_load_from_encoder_decoder(self): config = self.get_encoder_decoder_config_small() encoder = TFViTModel(config.encoder) encoder.build() decoder = TFGPT2LMHeadModel(config.decoder) decoder.build() encoder_decoder_orig = TFVisionEncoderDecoderModel(encoder=encoder, decoder=decoder) pixel_values = floats_tensor( [ 13, encoder.config.num_channels, encoder.config.image_size, encoder.config.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], decoder.config.vocab_size) logits_orig = encoder_decoder_orig(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits with tempfile.TemporaryDirectory() as tmp_dirname: encoder_path = os.path.join(tmp_dirname, "encoder") decoder_path = os.path.join(tmp_dirname, "decoder") encoder.save_pretrained(encoder_path) decoder.save_pretrained(decoder_path) encoder_decoder = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_path, decoder_path) logits_1 = encoder_decoder(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits self.assertTrue(logits_orig.numpy().sum() - logits_1.numpy().sum() < 1e-3) max_diff = np.max(np.abs(logits_1.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder.save_pretrained(tmp_dirname) encoder_decoder = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) logits_2 = encoder_decoder(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_2.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) @require_torch @is_pt_tf_cross_test def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self): config = self.get_encoder_decoder_config_small() encoder_pt = ViTModel(config.encoder).to(torch_device).eval() decoder_pt = GPT2LMHeadModel(config.decoder).to(torch_device).eval() encoder_decoder_pt = VisionEncoderDecoderModel(encoder=encoder_pt, decoder=decoder_pt).to(torch_device).eval() pixel_values = floats_tensor( [ 13, encoder_pt.config.num_channels, encoder_pt.config.image_size, encoder_pt.config.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], decoder_pt.config.vocab_size) pt_pixel_values = torch.tensor(pixel_values.numpy(), device=torch_device, dtype=torch.float) pt_decoder_input_ids = torch.tensor(decoder_input_ids.numpy(), device=torch_device, dtype=torch.long) logits_pt = encoder_decoder_pt(pixel_values=pt_pixel_values, decoder_input_ids=pt_decoder_input_ids).logits with tempfile.TemporaryDirectory() as tmp_dirname_1, tempfile.TemporaryDirectory() as tmp_dirname_2: encoder_decoder_pt.encoder.save_pretrained(tmp_dirname_1) encoder_decoder_pt.decoder.save_pretrained(tmp_dirname_2) encoder_decoder_tf = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( tmp_dirname_1, tmp_dirname_2 ) logits_tf = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder_tf.save_pretrained(tmp_dirname, safe_serialization=False) encoder_decoder_tf = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) logits_tf_2 = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_tf_2.numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) @require_vision @slow def test_encoder_decoder_from_pretrained(self): load_weight_prefix = TFVisionEncoderDecoderModel.load_weight_prefix config = self.get_encoder_decoder_config() image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") decoder_tokenizer = AutoTokenizer.from_pretrained("gpt2") img = prepare_img() pixel_values = image_processor(images=img, return_tensors="tf").pixel_values decoder_input_ids = decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids with tempfile.TemporaryDirectory() as tmp_dirname: encoder = TFAutoModel.from_pretrained("google/vit-base-patch16-224-in21k", name="encoder") decoder = TFAutoModelForCausalLM.from_pretrained( "gpt2", is_decoder=True, add_cross_attention=True, name="decoder" ) pretrained_encoder_dir = os.path.join(tmp_dirname, "pretrained_encoder") pretrained_decoder_dir = os.path.join(tmp_dirname, "pretrained_decoder") encoder.save_pretrained(pretrained_encoder_dir) decoder.save_pretrained(pretrained_decoder_dir) del encoder del decoder enc_dec_model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( pretrained_encoder_dir, pretrained_decoder_dir, ) enc_dec_model.save_pretrained(tmp_dirname) enc_dec_model = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) output = enc_dec_model(pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del enc_dec_model encoder = TFAutoModel.from_pretrained( pretrained_encoder_dir, load_weight_prefix=load_weight_prefix, name="encoder" ) decoder = TFAutoModelForCausalLM.from_pretrained( pretrained_decoder_dir, load_weight_prefix=load_weight_prefix, name="decoder" ) enc_dec_model = TFVisionEncoderDecoderModel(config=config, encoder=encoder, decoder=decoder) output = enc_dec_model(pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_init = output.loss max_diff = np.max(np.abs(loss_pretrained - loss_init)) expected_diff = 0.0 self.assertAlmostEqual(max_diff, expected_diff, places=4) @require_vision @require_tf class TFViT2GPT2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_coco_en(self): loc = "ydshieh/vit-gpt2-coco-en" image_processor = ViTImageProcessor.from_pretrained(loc) tokenizer = AutoTokenizer.from_pretrained(loc) model = TFVisionEncoderDecoderModel.from_pretrained(loc) img = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") pixel_values = image_processor(images=img, return_tensors="tf").pixel_values decoder_input_ids = tf.constant([[model.config.decoder_start_token_id]]) logits = model(pixel_values, decoder_input_ids)[0].numpy() expected_shape = (1, 1, model.config.decoder.vocab_size) self.assertEqual(logits.shape, expected_shape) EXPECTED_LOGIT_SLICE = np.array( [ -38.705807, -30.639929, -31.41903, -39.012012, -38.38696, -34.887207, -33.290855, -35.68447, -38.508484, -36.124645, ] ) max_diff = np.amax(np.abs(logits[0, 0, :10] - EXPECTED_LOGIT_SLICE)) self.assertLessEqual(max_diff, 1e-4) def generate_step(pixel_values): outputs = model.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True) output_ids = outputs.sequences preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) preds = [pred.strip() for pred in preds] return preds preds = generate_step(pixel_values) self.assertEqual(preds, ["a cat laying on top of a couch next to another cat"])
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch visiontextdualencoder model import collections import tempfile import unittest import numpy as np from transformers testingutils import isptflaxcrosstest requireflax requiretorch requirevision slow torchdevice from transformers utils import isflaxavailable istorchavailable isvisionavailable from testmodelingflaxcommon import floatstensor idstensor randomattentionmask from bert testmodelingflaxbert import flaxbertmodeltester from clip testmodelingflaxclip import flaxclipvisionmodeltester from vit testmodelingflaxvit import flaxvitmodeltester if isflaxavailable from transformers import flaxbertmodel flaxclipvisionmodel flaxvisiontextdualencodermodel flaxvitmodel visiontextdualencoderconfig visiontextdualencoderprocessor from transformers modelingflaxpytorchutils import convertpytorchstatedicttoflax loadflaxweightsinpytorchmodel if istorchavailable import torch from transformers import visiontextdualencodermodel if isvisionavailable from pil import image inspired by https github comrwightmanpytorchimagemodelsblobb9bd960a032c75ca6b808ddeed76bee5f3ed4972timmmodelslayershelpers py from pytorch internals def to2tuplex if isinstancex collections abc iterable return x return x x requireflax class visiontextdualencodermixin def getvisiontextmodelself config textconfig pass def prepareconfigandinputsself pass def getpretrainedmodelandinputsself pass def assertalmostequalsself a np ndarray b np ndarray tol float diff np absa b max self assertlessequaldiff tol fdifference between torch and flax is diff tol def checkmodelfrompretrainedconfigs self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs config visiontextdualencoderconfig fromvisiontextconfigsvisionconfig textconfig model flaxvisiontextdualencodermodelconfig output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask self assertequaloutputtextembeds shape inputids shape0 config projectiondim self assertequaloutputimageembeds shape pixelvalues shape0 config projectiondim def checkvisiontextdualencoderfrompretrained self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig kwargs visionmodel visionmodel textmodel textmodel model flaxvisiontextdualencodermodel fromvisiontextpretrainedkwargs output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask self assertequaloutputtextembeds shape inputids shape0 model config projectiondim self assertequaloutputimageembeds shape pixelvalues shape0 model config projectiondim def checksaveloadself textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig kwargs visionmodel visionmodel textmodel textmodel model flaxvisiontextdualencodermodel fromvisiontextpretrainedkwargs output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask out1 output0 with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model flaxvisiontextdualencodermodel frompretrainedtmpdirname afteroutput modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask out2 afteroutput0 maxdiff np amaxnp absout2 out1 self assertlessequalmaxdiff 1e3 def checkvisiontextoutputattention self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig kwargs visionmodel visionmodel textmodel textmodel model flaxvisiontextdualencodermodel fromvisiontextpretrainedkwargs output model inputidsinputids pixelvaluespixelvalues attentionmaskattentionmask outputattentionstrue visionattentions output visionmodeloutput attentions self assertequallenvisionattentions visionconfig numhiddenlayers in vit the seqlen equals the number of patches 1 we add 1 for the cls token imagesize to2tuplevisionmodel config imagesize patchsize to2tuplevisionmodel config patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 1 self assertequalvisionattentions0 shape3 visionconfig numattentionheads seqlen seqlen textattentions output textmodeloutput attentions self assertequallentextattentions textconfig numhiddenlayers self assertequal textattentions0 shape3 textconfig numattentionheads inputids shape1 inputids shape1 def checkptflaxequivalenceself ptmodel fxmodel inputsdict ptmodel totorchdevice ptmodel eval prepare inputs flaxinputs inputsdict ptinputs k torch tensorv tolist for k v in flaxinputs items with torch nograd ptoutputs ptmodelptinputs totuple fxoutputs fxmodelinputsdict totuple self assertequallenfxoutputs lenptoutputs output lengths differ between flax and pytorch for fxoutput ptoutput in zipfxoutputs 4 ptoutputs 4 self assertalmostequalsfxoutput ptoutput numpy 4e2 pt flax with tempfile temporarydirectory as tmpdirname ptmodel savepretrainedtmpdirname fxmodelloaded flaxvisiontextdualencodermodel frompretrainedtmpdirname frompttrue fxoutputsloaded fxmodelloadedinputsdict totuple self assertequallenfxoutputsloaded lenptoutputs output lengths differ between flax and pytorch for fxoutputloaded ptoutput in zipfxoutputsloaded 4 ptoutputs 4 self assertalmostequalsfxoutputloaded ptoutput numpy 4e2 flax pt with tempfile temporarydirectory as tmpdirname fxmodel savepretrainedtmpdirname ptmodelloaded visiontextdualencodermodel frompretrainedtmpdirname fromflaxtrue ptmodelloaded totorchdevice ptmodelloaded eval with torch nograd ptoutputsloaded ptmodelloadedptinputs totuple self assertequallenfxoutputs lenptoutputsloaded output lengths differ between flax and pytorch for fxoutput ptoutputloaded in zipfxoutputs 4 ptoutputsloaded 4 self assertalmostequalsfxoutput ptoutputloaded numpy 4e2 def checkequivalencepttoflaxself visionconfig textconfig inputsdict config visiontextdualencoderconfig fromvisiontextconfigsvisionconfig textconfig ptmodel visiontextdualencodermodelconfig fxmodel flaxvisiontextdualencodermodelconfig fxstate convertpytorchstatedicttoflaxptmodel statedict fxmodel fxmodel params fxstate self checkptflaxequivalenceptmodel fxmodel inputsdict def checkequivalenceflaxtoptself visionconfig textconfig inputsdict config visiontextdualencoderconfig fromvisiontextconfigsvisionconfig textconfig ptmodel visiontextdualencodermodelconfig fxmodel flaxvisiontextdualencodermodelconfig ptmodel loadflaxweightsinpytorchmodelptmodel fxmodel params self checkptflaxequivalenceptmodel fxmodel inputsdict def testmodelfrompretrainedconfigsself inputsdict self prepareconfigandinputs self checkmodelfrompretrainedconfigsinputsdict def testvisiontextdualencoderfrompretrainedself inputsdict self prepareconfigandinputs self checkvisiontextdualencoderfrompretrainedinputsdict def testsaveloadself inputsdict self prepareconfigandinputs self checksaveloadinputsdict def testvisiontextoutputattentionself inputsdict self prepareconfigandinputs self checkvisiontextoutputattentioninputsdict isptflaxcrosstest def testptflaxequivalenceself configinputsdict self prepareconfigandinputs visionconfig configinputsdict popvisionconfig textconfig configinputsdict poptextconfig inputsdict configinputsdict self checkequivalencepttoflaxvisionconfig textconfig inputsdict self checkequivalenceflaxtoptvisionconfig textconfig inputsdict slow def testrealmodelsaveloadfrompretrainedself model2 inputs self getpretrainedmodelandinputs outputs model2inputs out2 outputs0 with tempfile temporarydirectory as tmpdirname model2 savepretrainedtmpdirname model1 flaxvisiontextdualencodermodel frompretrainedtmpdirname afteroutputs model1inputs out1 afteroutputs0 maxdiff np amaxnp absout1 out2 self assertlessequalmaxdiff 1e5 requireflax class flaxvitbertmodeltestvisiontextdualencodermixin unittest testcase def getpretrainedmodelandinputsself model flaxvisiontextdualencodermodel fromvisiontextpretrained hfinternaltestingtinyrandomvit hfinternaltestingtinybert visionfrompttrue textfrompttrue batchsize 13 pixelvalues floatstensor batchsize model config visionconfig numchannels model config visionconfig imagesize model config visionconfig imagesize inputids idstensorbatchsize 4 model config textconfig vocabsize attentionmask randomattentionmaskbatchsize 4 inputs pixelvalues pixelvalues inputids inputids attentionmask attentionmask return model inputs def getvisiontextmodelself visionconfig textconfig visionmodel flaxvitmodelvisionconfig textmodel flaxbertmodeltextconfig return visionmodel textmodel def prepareconfigandinputsself vitmodeltester flaxvitmodeltesterself bertmodeltester flaxbertmodeltesterself visionconfigandinputs vitmodeltester prepareconfigandinputs textconfigandinputs bertmodeltester prepareconfigandinputs visionconfig pixelvalues visionconfigandinputs textconfig inputids tokentypeids attentionmask textconfigandinputs make sure that cross attention layers are added return textconfig textconfig visionconfig visionconfig pixelvalues pixelvalues attentionmask attentionmask inputids inputids tokentypeids tokentypeids requiretorch class flaxclipvisionbertmodeltestvisiontextdualencodermixin unittest testcase def getpretrainedmodelandinputsself model flaxvisiontextdualencodermodel fromvisiontextpretrained hfinternaltestingtinyrandomclip hfinternaltestingtinybert visionfrompttrue textfrompttrue batchsize 13 pixelvalues floatstensor batchsize model config visionconfig numchannels model config visionconfig imagesize model config visionconfig imagesize inputids idstensorbatchsize 4 model config textconfig vocabsize attentionmask randomattentionmaskbatchsize 4 inputs pixelvalues pixelvalues inputids inputids attentionmask attentionmask return model inputs def getvisiontextmodelself visionconfig textconfig visionmodel flaxclipvisionmodelvisionconfig textmodel flaxbertmodeltextconfig return visionmodel textmodel def prepareconfigandinputsself clipmodeltester flaxclipvisionmodeltesterself bertmodeltester flaxbertmodeltesterself visionconfigandinputs clipmodeltester prepareconfigandinputs textconfigandinputs bertmodeltester prepareconfigandinputs visionconfig pixelvalues visionconfigandinputs textconfig inputids tokentypeids attentionmask textconfigandinputs make sure that cross attention layers are added return textconfig textconfig visionconfig visionconfig pixelvalues pixelvalues attentionmask attentionmask inputids inputids tokentypeids tokentypeids requireflax requirevision class flaxvisiontextdualencoderintegrationtestunittest testcase slow def testinferenceself model flaxvisiontextdualencodermodel frompretrainedclipitalianclipitalian logitscaleinitvalue1 0 processor visiontextdualencoderprocessor frompretrainedclipitalianclipitalian image image open testsfixturestestssamplescoco000000039769 png inputs processor textuna foto di un gatto una foto di un cane imagesimage paddingtrue returntensorsnp outputs modelinputs verify the logits self assertequaloutputs logitsperimage shape inputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape inputs inputids shape0 inputs pixelvalues shape0 expectedlogits np array1 2284727 0 3104122 self asserttruenp allcloseoutputs logitsperimage expectedlogits atol1e3 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch visiontextdualencoder model inspired by https github com rwightman pytorch image models blob b9bd960a032c75ca6b808ddeed76bee5f3ed4972 timm models layers helpers py from pytorch internals in vit the seq_len equals the number of patches 1 we add 1 for the cls token prepare inputs pt flax flax pt make sure that cross attention layers are added make sure that cross attention layers are added verify the logits
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def to_2tuple(x): if isinstance(x, collections.abc.Iterable): return x return (x, x) @require_flax class VisionTextDualEncoderMixin: def get_vision_text_model(self, config, text_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_model_from_pretrained_configs( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) model = FlaxVisionTextDualEncoderModel(config) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim)) def check_vision_text_dual_encoder_from_pretrained( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_save_load(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_1 = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = FlaxVisionTextDualEncoderModel.from_pretrained(tmpdirname) after_output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_2 = after_output[0] max_diff = np.amax(np.abs(out_2 - out_1)) self.assertLessEqual(max_diff, 1e-3) def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def check_pt_flax_equivalence(self, pt_model, fx_model, inputs_dict): pt_model.to(torch_device) pt_model.eval() flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxVisionTextDualEncoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = VisionTextDualEncoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 4e-2) def check_equivalence_pt_to_flax(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def check_equivalence_flax_to_pt(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def test_model_from_pretrained_configs(self): inputs_dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**inputs_dict) def test_vision_text_dual_encoder_from_pretrained(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**inputs_dict) def test_save_load(self): inputs_dict = self.prepare_config_and_inputs() self.check_save_load(**inputs_dict) def test_vision_text_output_attention(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**inputs_dict) @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() vision_config = config_inputs_dict.pop("vision_config") text_config = config_inputs_dict.pop("text_config") inputs_dict = config_inputs_dict self.check_equivalence_pt_to_flax(vision_config, text_config, inputs_dict) self.check_equivalence_flax_to_pt(vision_config, text_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() outputs = model_2(**inputs) out_2 = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = FlaxVisionTextDualEncoderModel.from_pretrained(tmp_dirname) after_outputs = model_1(**inputs) out_1 = after_outputs[0] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_flax class FlaxViTBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert", vision_from_pt=True, text_from_pt=True, ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.config.text_config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = FlaxViTModel(vision_config) text_model = FlaxBertModel(text_config) return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = FlaxViTModelTester(self) bert_model_tester = FlaxBertModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs text_config, input_ids, token_type_ids, attention_mask = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class FlaxCLIPVisionBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert", vision_from_pt=True, text_from_pt=True, ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.config.text_config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = FlaxCLIPVisionModel(vision_config) text_model = FlaxBertModel(text_config) return vision_model, text_model def prepare_config_and_inputs(self): clip_model_tester = FlaxCLIPVisionModelTester(self) bert_model_tester = FlaxBertModelTester(self) vision_config_and_inputs = clip_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs text_config, input_ids, token_type_ids, attention_mask = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class FlaxVisionTextDualEncoderIntegrationTest(unittest.TestCase): @slow def test_inference(self): model = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0) processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor( text=["una foto di un gatto", "una foto di un cane"], images=image, padding=True, return_tensors="np" ) outputs = model(**inputs) self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) expected_logits = np.array([[1.2284727, 0.3104122]]) self.assertTrue(np.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch visiontextdualencoder model from future import annotations import collections import tempfile import unittest import numpy as np from transformers testingutils import requiretf requirevision slow from transformers utils import istfavailable isvisionavailable from testmodelingtfcommon import floatstensor idstensor randomattentionmask from bert testmodelingtfbert import tfbertmodeltester from clip testmodelingtfclip import tfclipvisionmodeltester from deit testmodelingtfdeit import tfdeitmodeltester from roberta testmodelingtfroberta import tfrobertamodeltester from vit testmodelingtfvit import tfvitmodeltester if istfavailable from transformers import tfbertmodel tfclipvisionmodel tfdeitmodel tfrobertamodel tfvisiontextdualencodermodel tfvitmodel visiontextdualencoderconfig if isvisionavailable from pil import image from transformers import visiontextdualencoderprocessor inspired by https github comrwightmanpytorchimagemodelsblobb9bd960a032c75ca6b808ddeed76bee5f3ed4972timmmodelslayershelpers py from pytorch internals def to2tuplex if isinstancex collections abc iterable return x return x x requiretf class tfvisiontextdualencodermixin def getvisiontextmodelself config textconfig pass def prepareconfigandinputsself pass def getpretrainedmodelandinputsself pass def checkmodelfrompretrainedconfigs self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs config visiontextdualencoderconfig fromvisiontextconfigsvisionconfig textconfig model tfvisiontextdualencodermodelconfig output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask self assertequaloutputtextembeds shape inputids shape0 config projectiondim self assertequaloutputimageembeds shape pixelvalues shape0 config projectiondim def checkvisiontextdualencodermodel self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig model tfvisiontextdualencodermodelvisionmodelvisionmodel textmodeltextmodel output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask self assertequaloutputtextembeds shape inputids shape0 model config projectiondim self assertequaloutputimageembeds shape pixelvalues shape0 model config projectiondim def checkvisiontextdualencoderfrompretrained self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig kwargs visionmodel visionmodel textmodel textmodel model tfvisiontextdualencodermodel fromvisiontextpretrainedkwargs output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask self assertequaloutputtextembeds shape inputids shape0 model config projectiondim self assertequaloutputimageembeds shape pixelvalues shape0 model config projectiondim def checksaveloadself textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig model tfvisiontextdualencodermodelvisionmodelvisionmodel textmodeltextmodel output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask out1 output0 numpy with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model tfvisiontextdualencodermodel frompretrainedtmpdirname afteroutput modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask out2 afteroutput0 numpy maxdiff np amaxnp absout2 out1 self assertlessequalmaxdiff 1e5 def checkvisiontextoutputattention self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig model tfvisiontextdualencodermodelvisionmodelvisionmodel textmodeltextmodel output model inputidsinputids pixelvaluespixelvalues attentionmaskattentionmask outputattentionstrue visionattentions output visionmodeloutput attentions self assertequallenvisionattentions visionconfig numhiddenlayers in vit the seqlen equals the number of patches 1 we add 1 for the cls token imagesize to2tuplevisionmodel config imagesize patchsize to2tuplevisionmodel config patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 1 self assertequalvisionattentions0 shape3 visionconfig numattentionheads seqlen seqlen textattentions output textmodeloutput attentions self assertequallentextattentions textconfig numhiddenlayers self assertequal textattentions0 shape3 textconfig numattentionheads inputids shape1 inputids shape1 def assertalmostequalsself a np ndarray b np ndarray tol float diff np absa b max self assertlessequaldiff tol fdifference between torch and flax is diff tol def testvisiontextdualencodermodelself inputsdict self prepareconfigandinputs self checkvisiontextdualencodermodelinputsdict def testmodelfrompretrainedconfigsself inputsdict self prepareconfigandinputs self checkmodelfrompretrainedconfigsinputsdict def testvisiontextdualencoderfrompretrainedself inputsdict self prepareconfigandinputs self checkvisiontextdualencoderfrompretrainedinputsdict def testsaveloadself inputsdict self prepareconfigandinputs self checksaveloadinputsdict def testvisiontextoutputattentionself inputsdict self prepareconfigandinputs self checkvisiontextoutputattentioninputsdict slow def testrealmodelsaveloadfrompretrainedself model2 inputs self getpretrainedmodelandinputs outputs model2inputs out2 outputs0 numpy with tempfile temporarydirectory as tmpdirname model2 savepretrainedtmpdirname model1 tfvisiontextdualencodermodel frompretrainedtmpdirname afteroutputs model1inputs out1 afteroutputs0 numpy maxdiff np amaxnp absout1 out2 self assertlessequalmaxdiff 1e5 requiretf class tfvitbertmodeltesttfvisiontextdualencodermixin unittest testcase def getpretrainedmodelandinputsself model tfvisiontextdualencodermodel fromvisiontextpretrained hfinternaltestingtinyrandomvit hfinternaltestingtinyrandombert batchsize 13 pixelvalues floatstensor batchsize model visionmodel config numchannels model visionmodel config imagesize model visionmodel config imagesize inputids idstensorbatchsize 4 model textmodel config vocabsize attentionmask randomattentionmaskbatchsize 4 inputs pixelvalues pixelvalues inputids inputids attentionmask attentionmask return model inputs def getvisiontextmodelself visionconfig textconfig visionmodel tfvitmodelvisionconfig namevisionmodel textmodel tfbertmodeltextconfig nametextmodel return visionmodel textmodel def prepareconfigandinputsself vitmodeltester tfvitmodeltesterself bertmodeltester tfbertmodeltesterself visionconfigandinputs vitmodeltester prepareconfigandinputs textconfigandinputs bertmodeltester prepareconfigandinputs visionconfig pixelvalues visionconfigandinputs textconfig inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels textconfigandinputs return textconfig textconfig visionconfig visionconfig pixelvalues pixelvalues attentionmask inputmask inputids inputids texttokentypeids tokentypeids textsequencelabels sequencelabels texttokenlabels tokenlabels textchoicelabels choicelabels requiretf class tfdeitrobertamodeltesttfvisiontextdualencodermixin unittest testcase def getpretrainedmodelandinputsself deit repo doesn t have tf weights but we don t actually use the weights at all so let s just reinitialize it model tfvisiontextdualencodermodel fromvisiontextpretrained rocketknight1tinyrandomdeittf hfinternaltestingtinyrandomroberta batchsize 13 pixelvalues floatstensor batchsize model visionmodel config numchannels model visionmodel config imagesize model visionmodel config imagesize inputids idstensorbatchsize 4 model textmodel config vocabsize attentionmask randomattentionmaskbatchsize 4 inputs pixelvalues pixelvalues inputids inputids attentionmask attentionmask return model inputs def checkvisiontextoutputattention self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig model tfvisiontextdualencodermodelvisionmodelvisionmodel textmodeltextmodel output model inputidsinputids pixelvaluespixelvalues attentionmaskattentionmask outputattentionstrue visionattentions output visionmodeloutput attentions self assertequallenvisionattentions visionconfig numhiddenlayers in deit the seqlen equals the number of patches 2 we add 2 for the cls and distillation tokens imagesize to2tuplevisionmodel config imagesize patchsize to2tuplevisionmodel config patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 2 self assertequalvisionattentions0 shape3 visionconfig numattentionheads seqlen seqlen textattentions output textmodeloutput attentions self assertequallentextattentions textconfig numhiddenlayers self assertequal textattentions0 shape3 textconfig numattentionheads inputids shape1 inputids shape1 def getvisiontextmodelself visionconfig textconfig visionmodel tfdeitmodelvisionconfig namevisionmodel textmodel tfrobertamodeltextconfig nametextmodel return visionmodel textmodel def prepareconfigandinputsself vitmodeltester tfdeitmodeltesterself bertmodeltester tfrobertamodeltesterself visionconfigandinputs vitmodeltester prepareconfigandinputs textconfigandinputs bertmodeltester prepareconfigandinputs visionconfig pixelvalues visionconfigandinputs textconfig inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels textconfigandinputs return textconfig textconfig visionconfig visionconfig pixelvalues pixelvalues attentionmask inputmask inputids inputids texttokentypeids tokentypeids textsequencelabels sequencelabels texttokenlabels tokenlabels textchoicelabels choicelabels requiretf class tfclipvisionbertmodeltesttfvisiontextdualencodermixin unittest testcase def getpretrainedmodelandinputsself model tfvisiontextdualencodermodel fromvisiontextpretrained rocketknight1tinyrandomcliptf hfinternaltestingtinyrandombert batchsize 13 pixelvalues floatstensor batchsize model visionmodel config numchannels model visionmodel config imagesize model visionmodel config imagesize inputids idstensorbatchsize 4 model textmodel config vocabsize attentionmask randomattentionmaskbatchsize 4 inputs pixelvalues pixelvalues inputids inputids attentionmask attentionmask return model inputs def getvisiontextmodelself visionconfig textconfig visionmodel tfclipvisionmodelvisionconfig namevisionmodel textmodel tfbertmodeltextconfig nametextmodel return visionmodel textmodel def prepareconfigandinputsself clipmodeltester tfclipvisionmodeltesterself bertmodeltester tfbertmodeltesterself visionconfigandinputs clipmodeltester prepareconfigandinputs textconfigandinputs bertmodeltester prepareconfigandinputs visionconfig pixelvalues visionconfigandinputs textconfig inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels textconfigandinputs return textconfig textconfig visionconfig visionconfig pixelvalues pixelvalues attentionmask inputmask inputids inputids texttokentypeids tokentypeids textsequencelabels sequencelabels texttokenlabels tokenlabels textchoicelabels choicelabels requirevision requiretf class tfvisiontextdualencoderintegrationtestunittest testcase slow def testinferenceself model tfvisiontextdualencodermodel frompretrained clipitalianclipitalian logitscaleinitvalue1 0 frompttrue processor visiontextdualencoderprocessor frompretrainedclipitalianclipitalian image image open testsfixturestestssamplescoco000000039769 png inputs processor textuna foto di un gatto una foto di un cane imagesimage paddingtrue returntensorsnp outputs modelinputs verify the logits self assertequaloutputs logitsperimage shape inputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape inputs inputids shape0 inputs pixelvalues shape0 expectedlogits np array1 2284727 0 3104122 self asserttruenp allcloseoutputs logitsperimage numpy expectedlogits atol1e3 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch visiontextdualencoder model inspired by https github com rwightman pytorch image models blob b9bd960a032c75ca6b808ddeed76bee5f3ed4972 timm models layers helpers py from pytorch internals in vit the seq_len equals the number of patches 1 we add 1 for the cls token deit repo doesn t have tf weights but we don t actually use the weights at all so let s just reinitialize it in deit the seq_len equals the number of patches 2 we add 2 for the cls and distillation tokens verify the logits
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def to_2tuple(x): if isinstance(x, collections.abc.Iterable): return x return (x, x) @require_tf class TFVisionTextDualEncoderMixin: def get_vision_text_model(self, config, text_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_model_from_pretrained_configs( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) model = TFVisionTextDualEncoderModel(config) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim)) def check_vision_text_dual_encoder_model( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = TFVisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_vision_text_dual_encoder_from_pretrained( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_save_load(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = TFVisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_1 = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = TFVisionTextDualEncoderModel.from_pretrained(tmpdirname) after_output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_2 = after_output[0].numpy() max_diff = np.amax(np.abs(out_2 - out_1)) self.assertLessEqual(max_diff, 1e-5) def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = TFVisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def test_vision_text_dual_encoder_model(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**inputs_dict) def test_model_from_pretrained_configs(self): inputs_dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**inputs_dict) def test_vision_text_dual_encoder_from_pretrained(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**inputs_dict) def test_save_load(self): inputs_dict = self.prepare_config_and_inputs() self.check_save_load(**inputs_dict) def test_vision_text_output_attention(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() outputs = model_2(**inputs) out_2 = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = TFVisionTextDualEncoderModel.from_pretrained(tmp_dirname) after_outputs = model_1(**inputs) out_1 = after_outputs[0].numpy() max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_tf class TFViTBertModelTest(TFVisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-random-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = TFViTModel(vision_config, name="vision_model") text_model = TFBertModel(text_config, name="text_model") return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = TFViTModelTester(self) bert_model_tester = TFBertModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class TFDeiTRobertaModelTest(TFVisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf", "hf-internal-testing/tiny-random-roberta" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = TFVisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def get_vision_text_model(self, vision_config, text_config): vision_model = TFDeiTModel(vision_config, name="vision_model") text_model = TFRobertaModel(text_config, name="text_model") return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = TFDeiTModelTester(self) bert_model_tester = TFRobertaModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class TFCLIPVisionBertModelTest(TFVisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf", "hf-internal-testing/tiny-random-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = TFCLIPVisionModel(vision_config, name="vision_model") text_model = TFBertModel(text_config, name="text_model") return vision_model, text_model def prepare_config_and_inputs(self): clip_model_tester = TFCLIPVisionModelTester(self) bert_model_tester = TFBertModelTester(self) vision_config_and_inputs = clip_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class TFVisionTextDualEncoderIntegrationTest(unittest.TestCase): @slow def test_inference(self): model = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian", logit_scale_init_value=1.0, from_pt=True ) processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor( text=["una foto di un gatto", "una foto di un cane"], images=image, padding=True, return_tensors="np" ) outputs = model(**inputs) self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) expected_logits = np.array([[1.2284727, 0.3104122]]) self.assertTrue(np.allclose(outputs.logits_per_image.numpy(), expected_logits, atol=1e-3))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch visiontextdualencoder model import collections import tempfile import unittest import numpy as np from transformers testingutils import isptflaxcrosstest requiretorch requirevision slow torchdevice from transformers utils import isflaxavailable istorchavailable isvisionavailable from testmodelingcommon import floatstensor idstensor randomattentionmask from bert testmodelingbert import bertmodeltester from clip testmodelingclip import clipvisionmodeltester from deit testmodelingdeit import deitmodeltester from roberta testmodelingroberta import robertamodeltester from vit testmodelingvit import vitmodeltester if istorchavailable import torch from transformers import bertmodel clipvisionmodel deitmodel robertamodel visiontextdualencoderconfig visiontextdualencodermodel vitmodel if isflaxavailable from transformers import flaxvisiontextdualencodermodel from transformers modelingflaxpytorchutils import convertpytorchstatedicttoflax loadflaxweightsinpytorchmodel if isvisionavailable from pil import image from transformers import visiontextdualencoderprocessor inspired by https github comrwightmanpytorchimagemodelsblobb9bd960a032c75ca6b808ddeed76bee5f3ed4972timmmodelslayershelpers py from pytorch internals def to2tuplex if isinstancex collections abc iterable return x return x x requiretorch class visiontextdualencodermixin def getvisiontextmodelself config textconfig pass def prepareconfigandinputsself pass def getpretrainedmodelandinputsself pass def checkmodelfrompretrainedconfigs self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs config visiontextdualencoderconfig fromvisiontextconfigsvisionconfig textconfig model visiontextdualencodermodelconfig model totorchdevice model eval output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask self assertequaloutputtextembeds shape inputids shape0 config projectiondim self assertequaloutputimageembeds shape pixelvalues shape0 config projectiondim def checkvisiontextdualencodermodel self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig model visiontextdualencodermodelvisionmodelvisionmodel textmodeltextmodel model totorchdevice model eval output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask self assertequaloutputtextembeds shape inputids shape0 model config projectiondim self assertequaloutputimageembeds shape pixelvalues shape0 model config projectiondim def checkvisiontextdualencoderfrompretrained self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig kwargs visionmodel visionmodel textmodel textmodel model visiontextdualencodermodel fromvisiontextpretrainedkwargs model totorchdevice model eval output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask self assertequaloutputtextembeds shape inputids shape0 model config projectiondim self assertequaloutputimageembeds shape pixelvalues shape0 model config projectiondim def checksaveloadself textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig model visiontextdualencodermodelvisionmodelvisionmodel textmodeltextmodel model totorchdevice model eval with torch nograd output modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask out1 output0 cpu numpy with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model visiontextdualencodermodel frompretrainedtmpdirname eval model totorchdevice afteroutput modelinputidsinputids pixelvaluespixelvalues attentionmaskattentionmask out2 afteroutput0 cpu numpy maxdiff np amaxnp absout2 out1 self assertlessequalmaxdiff 1e5 def checkvisiontextoutputattention self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig model visiontextdualencodermodelvisionmodelvisionmodel textmodeltextmodel model totorchdevice model eval output model inputidsinputids pixelvaluespixelvalues attentionmaskattentionmask outputattentionstrue visionattentions output visionmodeloutput attentions self assertequallenvisionattentions visionconfig numhiddenlayers in vit the seqlen equals the number of patches 1 we add 1 for the cls token imagesize to2tuplevisionmodel config imagesize patchsize to2tuplevisionmodel config patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 1 self assertequalvisionattentions0 shape3 visionconfig numattentionheads seqlen seqlen textattentions output textmodeloutput attentions self assertequallentextattentions textconfig numhiddenlayers self assertequal textattentions0 shape3 textconfig numattentionheads inputids shape1 inputids shape1 def assertalmostequalsself a np ndarray b np ndarray tol float diff np absa b max self assertlessequaldiff tol fdifference between torch and flax is diff tol def checkptflaxequivalenceself ptmodel fxmodel inputids attentionmask pixelvalues kwargs ptmodel totorchdevice ptmodel eval prepare inputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues ptinputs inputsdict flaxinputs k v numpy for k v in ptinputs items with torch nograd ptoutputs ptmodelptinputs totuple fxoutputs fxmodelflaxinputs totuple self assertequallenfxoutputs lenptoutputs output lengths differ between flax and pytorch for fxoutput ptoutput in zipfxoutputs 4 ptoutputs 4 self assertalmostequalsfxoutput ptoutput numpy 4e2 pt flax with tempfile temporarydirectory as tmpdirname ptmodel savepretrainedtmpdirname fxmodelloaded flaxvisiontextdualencodermodel frompretrainedtmpdirname frompttrue fxoutputsloaded fxmodelloadedflaxinputs totuple self assertequallenfxoutputsloaded lenptoutputs output lengths differ between flax and pytorch for fxoutputloaded ptoutput in zipfxoutputsloaded 4 ptoutputs 4 self assertalmostequalsfxoutputloaded ptoutput numpy 4e2 flax pt with tempfile temporarydirectory as tmpdirname fxmodel savepretrainedtmpdirname ptmodelloaded visiontextdualencodermodel frompretrainedtmpdirname fromflaxtrue ptmodelloaded totorchdevice ptmodelloaded eval with torch nograd ptoutputsloaded ptmodelloadedptinputs totuple self assertequallenfxoutputs lenptoutputsloaded output lengths differ between flax and pytorch for fxoutput ptoutputloaded in zipfxoutputs 4 ptoutputsloaded 4 self assertalmostequalsfxoutput ptoutputloaded numpy 4e2 def checkequivalencepttoflaxself visionconfig textconfig inputsdict config visiontextdualencoderconfig fromvisiontextconfigsvisionconfig textconfig ptmodel visiontextdualencodermodelconfig fxmodel flaxvisiontextdualencodermodelconfig fxstate convertpytorchstatedicttoflaxptmodel statedict fxmodel fxmodel params fxstate self checkptflaxequivalenceptmodel fxmodel inputsdict def checkequivalenceflaxtoptself visionconfig textconfig inputsdict config visiontextdualencoderconfig fromvisiontextconfigsvisionconfig textconfig ptmodel visiontextdualencodermodelconfig fxmodel flaxvisiontextdualencodermodelconfig ptmodel loadflaxweightsinpytorchmodelptmodel fxmodel params self checkptflaxequivalenceptmodel fxmodel inputsdict def testvisiontextdualencodermodelself inputsdict self prepareconfigandinputs self checkvisiontextdualencodermodelinputsdict def testmodelfrompretrainedconfigsself inputsdict self prepareconfigandinputs self checkmodelfrompretrainedconfigsinputsdict def testvisiontextdualencoderfrompretrainedself inputsdict self prepareconfigandinputs self checkvisiontextdualencoderfrompretrainedinputsdict def testsaveloadself inputsdict self prepareconfigandinputs self checksaveloadinputsdict def testvisiontextoutputattentionself inputsdict self prepareconfigandinputs self checkvisiontextoutputattentioninputsdict isptflaxcrosstest def testptflaxequivalenceself configinputsdict self prepareconfigandinputs visionconfig configinputsdict popvisionconfig textconfig configinputsdict poptextconfig inputsdict configinputsdict self checkequivalencepttoflaxvisionconfig textconfig inputsdict self checkequivalenceflaxtoptvisionconfig textconfig inputsdict slow def testrealmodelsaveloadfrompretrainedself model2 inputs self getpretrainedmodelandinputs model2 totorchdevice with torch nograd outputs model2inputs out2 outputs0 cpu numpy with tempfile temporarydirectory as tmpdirname model2 savepretrainedtmpdirname model1 visiontextdualencodermodel frompretrainedtmpdirname model1 totorchdevice afteroutputs model1inputs out1 afteroutputs0 cpu numpy maxdiff np amaxnp absout1 out2 self assertlessequalmaxdiff 1e5 requiretorch class vitbertmodeltestvisiontextdualencodermixin unittest testcase def getpretrainedmodelandinputsself model visiontextdualencodermodel fromvisiontextpretrained hfinternaltestingtinyrandomvit hfinternaltestingtinybert batchsize 13 pixelvalues floatstensor batchsize model visionmodel config numchannels model visionmodel config imagesize model visionmodel config imagesize inputids idstensorbatchsize 4 model textmodel config vocabsize attentionmask randomattentionmaskbatchsize 4 inputs pixelvalues pixelvalues inputids inputids attentionmask attentionmask return model inputs def getvisiontextmodelself visionconfig textconfig visionmodel vitmodelvisionconfig eval textmodel bertmodeltextconfig eval return visionmodel textmodel def prepareconfigandinputsself vitmodeltester vitmodeltesterself bertmodeltester bertmodeltesterself visionconfigandinputs vitmodeltester prepareconfigandinputs textconfigandinputs bertmodeltester prepareconfigandinputs visionconfig pixelvalues visionconfigandinputs textconfig inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels textconfigandinputs return textconfig textconfig visionconfig visionconfig pixelvalues pixelvalues attentionmask inputmask inputids inputids texttokentypeids tokentypeids textsequencelabels sequencelabels texttokenlabels tokenlabels textchoicelabels choicelabels requiretorch class deitrobertamodeltestvisiontextdualencodermixin unittest testcase def getpretrainedmodelandinputsself model visiontextdualencodermodel fromvisiontextpretrained hfinternaltestingtinyrandomdeit hfinternaltestingtinyrandomroberta batchsize 13 pixelvalues floatstensor batchsize model visionmodel config numchannels model visionmodel config imagesize model visionmodel config imagesize inputids idstensorbatchsize 4 model textmodel config vocabsize attentionmask randomattentionmaskbatchsize 4 inputs pixelvalues pixelvalues inputids inputids attentionmask attentionmask return model inputs def checkvisiontextoutputattention self textconfig inputids attentionmask visionconfig pixelvaluesnone kwargs visionmodel textmodel self getvisiontextmodelvisionconfig textconfig model visiontextdualencodermodelvisionmodelvisionmodel textmodeltextmodel model totorchdevice model eval output model inputidsinputids pixelvaluespixelvalues attentionmaskattentionmask outputattentionstrue visionattentions output visionmodeloutput attentions self assertequallenvisionattentions visionconfig numhiddenlayers in deit the seqlen equals the number of patches 2 we add 2 for the cls and distillation tokens imagesize to2tuplevisionmodel config imagesize patchsize to2tuplevisionmodel config patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 seqlen numpatches 2 self assertequalvisionattentions0 shape3 visionconfig numattentionheads seqlen seqlen textattentions output textmodeloutput attentions self assertequallentextattentions textconfig numhiddenlayers self assertequal textattentions0 shape3 textconfig numattentionheads inputids shape1 inputids shape1 def getvisiontextmodelself visionconfig textconfig visionmodel deitmodelvisionconfig eval textmodel robertamodeltextconfig eval return visionmodel textmodel def prepareconfigandinputsself vitmodeltester deitmodeltesterself bertmodeltester robertamodeltesterself visionconfigandinputs vitmodeltester prepareconfigandinputs textconfigandinputs bertmodeltester prepareconfigandinputs visionconfig pixelvalues visionconfigandinputs textconfig inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels textconfigandinputs return textconfig textconfig visionconfig visionconfig pixelvalues pixelvalues attentionmask inputmask inputids inputids texttokentypeids tokentypeids textsequencelabels sequencelabels texttokenlabels tokenlabels textchoicelabels choicelabels skip as deit is not available in flax def testptflaxequivalenceself pass requiretorch class clipvisionbertmodeltestvisiontextdualencodermixin unittest testcase def getpretrainedmodelandinputsself model visiontextdualencodermodel fromvisiontextpretrained hfinternaltestingtinyrandomclip hfinternaltestingtinybert batchsize 13 pixelvalues floatstensor batchsize model visionmodel config numchannels model visionmodel config imagesize model visionmodel config imagesize inputids idstensorbatchsize 4 model textmodel config vocabsize attentionmask randomattentionmaskbatchsize 4 inputs pixelvalues pixelvalues inputids inputids attentionmask attentionmask return model inputs def getvisiontextmodelself visionconfig textconfig visionmodel clipvisionmodelvisionconfig eval textmodel bertmodeltextconfig eval return visionmodel textmodel def prepareconfigandinputsself clipmodeltester clipvisionmodeltesterself bertmodeltester bertmodeltesterself visionconfigandinputs clipmodeltester prepareconfigandinputs textconfigandinputs bertmodeltester prepareconfigandinputs visionconfig pixelvalues visionconfigandinputs textconfig inputids tokentypeids inputmask sequencelabels tokenlabels choicelabels textconfigandinputs return textconfig textconfig visionconfig visionconfig pixelvalues pixelvalues attentionmask inputmask inputids inputids texttokentypeids tokentypeids textsequencelabels sequencelabels texttokenlabels tokenlabels textchoicelabels choicelabels requirevision requiretorch class visiontextdualencoderintegrationtestunittest testcase slow def testinferenceself model visiontextdualencodermodel frompretrainedclipitalianclipitalian logitscaleinitvalue1 0 processor visiontextdualencoderprocessor frompretrainedclipitalianclipitalian image image open testsfixturestestssamplescoco000000039769 png inputs processor textuna foto di un gatto una foto di un cane imagesimage paddingtrue returntensorspt outputs modelinputs verify the logits self assertequaloutputs logitsperimage shape inputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape inputs inputids shape0 inputs pixelvalues shape0 expectedlogits torch tensor1 2284727 0 3104122 self asserttruetorch allcloseoutputs logitsperimage expectedlogits atol1e3 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch visiontextdualencoder model inspired by https github com rwightman pytorch image models blob b9bd960a032c75ca6b808ddeed76bee5f3ed4972 timm models layers helpers py from pytorch internals in vit the seq_len equals the number of patches 1 we add 1 for the cls token prepare inputs pt flax flax pt in deit the seq_len equals the number of patches 2 we add 2 for the cls and distillation tokens skip as deit is not available in flax verify the logits
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import is_pt_flax_cross_test, require_torch, require_vision, slow, torch_device from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_bert import BertModelTester from ..clip.test_modeling_clip import CLIPVisionModelTester from ..deit.test_modeling_deit import DeiTModelTester from ..roberta.test_modeling_roberta import RobertaModelTester from ..vit.test_modeling_vit import ViTModelTester if is_torch_available(): import torch from transformers import ( BertModel, CLIPVisionModel, DeiTModel, RobertaModel, VisionTextDualEncoderConfig, VisionTextDualEncoderModel, ViTModel, ) if is_flax_available(): from transformers import FlaxVisionTextDualEncoderModel from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def to_2tuple(x): if isinstance(x, collections.abc.Iterable): return x return (x, x) @require_torch class VisionTextDualEncoderMixin: def get_vision_text_model(self, config, text_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_model_from_pretrained_configs( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) model = VisionTextDualEncoderModel(config) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim)) def check_vision_text_dual_encoder_model( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_vision_text_dual_encoder_from_pretrained( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = VisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_save_load(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() with torch.no_grad(): output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_1 = output[0].cpu().numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = VisionTextDualEncoderModel.from_pretrained(tmpdirname).eval() model.to(torch_device) after_output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_2 = after_output[0].cpu().numpy() max_diff = np.amax(np.abs(out_2 - out_1)) self.assertLessEqual(max_diff, 1e-5) def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_pt_flax_equivalence(self, pt_model, fx_model, input_ids, attention_mask, pixel_values, **kwargs): pt_model.to(torch_device) pt_model.eval() inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values} pt_inputs = inputs_dict flax_inputs = {k: v.numpy() for k, v in pt_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**flax_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxVisionTextDualEncoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**flax_inputs).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = VisionTextDualEncoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 4e-2) def check_equivalence_pt_to_flax(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, **inputs_dict) def check_equivalence_flax_to_pt(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, **inputs_dict) def test_vision_text_dual_encoder_model(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**inputs_dict) def test_model_from_pretrained_configs(self): inputs_dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**inputs_dict) def test_vision_text_dual_encoder_from_pretrained(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**inputs_dict) def test_save_load(self): inputs_dict = self.prepare_config_and_inputs() self.check_save_load(**inputs_dict) def test_vision_text_output_attention(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**inputs_dict) @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() vision_config = config_inputs_dict.pop("vision_config") text_config = config_inputs_dict.pop("text_config") inputs_dict = config_inputs_dict self.check_equivalence_pt_to_flax(vision_config, text_config, inputs_dict) self.check_equivalence_flax_to_pt(vision_config, text_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() model_2.to(torch_device) with torch.no_grad(): outputs = model_2(**inputs) out_2 = outputs[0].cpu().numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = VisionTextDualEncoderModel.from_pretrained(tmp_dirname) model_1.to(torch_device) after_outputs = model_1(**inputs) out_1 = after_outputs[0].cpu().numpy() max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_torch class ViTBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = ViTModel(vision_config).eval() text_model = BertModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = ViTModelTester(self) bert_model_tester = BertModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_torch class DeiTRobertaModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-deit", "hf-internal-testing/tiny-random-roberta" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def get_vision_text_model(self, vision_config, text_config): vision_model = DeiTModel(vision_config).eval() text_model = RobertaModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = DeiTModelTester(self) bert_model_tester = RobertaModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } def test_pt_flax_equivalence(self): pass @require_torch class CLIPVisionBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = CLIPVisionModel(vision_config).eval() text_model = BertModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): clip_model_tester = CLIPVisionModelTester(self) bert_model_tester = BertModelTester(self) vision_config_and_inputs = clip_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_torch class VisionTextDualEncoderIntegrationTest(unittest.TestCase): @slow def test_inference(self): model = VisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0) processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor( text=["una foto di un gatto", "una foto di un cane"], images=image, padding=True, return_tensors="pt" ) outputs = model(**inputs) self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) expected_logits = torch.tensor([[1.2284727, 0.3104122]]) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch visualbert model import copy import unittest from transformers import visualbertconfig istorchavailable from transformers testingutils import requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import visualbertformultiplechoice visualbertforpretraining visualbertforquestionanswering visualbertforregiontophrasealignment visualbertforvisualreasoning visualbertmodel from transformers models visualbert modelingvisualbert import visualbertpretrainedmodelarchivelist class visualbertmodeltester def init self parent batchsize13 seqlength7 visualseqlength5 istrainingtrue useattentionmasktrue usevisualattentionmasktrue usetokentypeidstrue usevisualtokentypeidstrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 visualembeddingdim20 typevocabsize16 typesequencelabelsize2 initializerrange0 02 numlabels3 numchoices4 scopenone self parent parent self batchsize batchsize self seqlength seqlength self visualseqlength visualseqlength self istraining istraining self useattentionmask useattentionmask self usevisualattentionmask usevisualattentionmask self usetokentypeids usetokentypeids self usevisualtokentypeids usevisualtokentypeids self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self visualembeddingdim visualembeddingdim self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self numlabels numlabels self numchoices numchoices self scope scope def getconfigself return visualbertconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize visualembeddingdimself visualembeddingdim numlabelsself numlabels isdecoderfalse initializerrangeself initializerrange def prepareconfigandinputsforcommonself inputids idstensorself batchsize self seqlength self vocabsize visualembeds floatstensorself batchsize self visualseqlength self visualembeddingdim attentionmask none if self useattentionmask attentionmask torch onesself batchsize self seqlength dtypetorch long devicetorchdevice visualattentionmask none if self usevisualattentionmask visualattentionmask torch ones self batchsize self visualseqlength dtypetorch long devicetorchdevice tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize visualtokentypeids none if self usevisualtokentypeids visualtokentypeids idstensorself batchsize self visualseqlength self typevocabsize config self getconfig return config inputids inputids tokentypeids tokentypeids attentionmask attentionmask visualembeds visualembeds visualtokentypeids visualtokentypeids visualattentionmask visualattentionmask def prepareconfigandinputsforpretrainingself maskedlmlabels none sentenceimagelabels none if self uselabels maskedlmlabels idstensorself batchsize self seqlength self visualseqlength self vocabsize sentenceimagelabels idstensor self batchsize self typesequencelabelsize config inputdict self prepareconfigandinputsforcommon inputdict updatelabels maskedlmlabels sentenceimagelabels sentenceimagelabels return config inputdict def prepareconfigandinputsformultiplechoiceself inputids idstensorself batchsize self numchoices self seqlength self vocabsize visualembeds floatstensor self batchsize self numchoices self visualseqlength self visualembeddingdim attentionmask none if self useattentionmask attentionmask torch ones self batchsize self numchoices self seqlength dtypetorch long devicetorchdevice visualattentionmask none if self usevisualattentionmask visualattentionmask torch ones self batchsize self numchoices self visualseqlength dtypetorch long devicetorchdevice tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self numchoices self seqlength self typevocabsize visualtokentypeids none if self usevisualtokentypeids visualtokentypeids idstensor self batchsize self numchoices self visualseqlength self typevocabsize labels none if self uselabels labels idstensorself batchsize self numchoices config self getconfig return config inputids inputids tokentypeids tokentypeids attentionmask attentionmask visualembeds visualembeds visualtokentypeids visualtokentypeids visualattentionmask visualattentionmask labels labels def prepareconfigandinputsforvqaself vqalabels none if self uselabels vqalabels floatstensorself batchsize self numlabels config inputdict self prepareconfigandinputsforcommon inputdict updatelabels vqalabels return config inputdict def prepareconfigandinputsfornlvrself nlvrlabels none if self uselabels nlvrlabels idstensorself batchsize self numlabels config inputdict self prepareconfigandinputsforcommon inputdict updatelabels nlvrlabels return config inputdict def prepareconfigandinputsforflickrself regiontophraseposition torch cat idstensorself batchsize self seqlength self visualseqlength torch onesself batchsize self visualseqlength dtypetorch long devicetorchdevice 1 dim1 flickrlabels none if self uselabels flickrlabels floatstensor self batchsize self seqlength self visualseqlength self visualseqlength config inputdict self prepareconfigandinputsforcommon inputdict updateregiontophraseposition regiontophraseposition labels flickrlabels return config inputdict def createandcheckmodelself config inputdict model visualbertmodelconfigconfig model totorchdevice model eval result modelinputdict self parent assertequal result lasthiddenstate shape self batchsize self seqlength self visualseqlength self hiddensize def createandcheckforpretrainingself config inputdict model visualbertforpretrainingconfigconfig model totorchdevice model eval result modelinputdict self parent assertequal result predictionlogits shape self batchsize self seqlength self visualseqlength self vocabsize def createandcheckforvqaself config inputdict model visualbertforquestionansweringconfigconfig model totorchdevice model eval result modelinputdict self parent assertequalresult logits shape self batchsize self numlabels def createandcheckformultiplechoiceself config inputdict model visualbertformultiplechoiceconfigconfig model totorchdevice model eval result modelinputdict self parent assertequalresult logits shape self batchsize self numchoices def createandcheckfornlvrself config inputdict model visualbertforvisualreasoningconfigconfig model totorchdevice model eval result modelinputdict self parent assertequalresult logits shape self batchsize self numlabels def createandcheckforflickrself config inputdict model visualbertforregiontophrasealignmentconfigconfig model totorchdevice model eval result modelinputdict self parent assertequal result logits shape self batchsize self seqlength self visualseqlength self visualseqlength requiretorch class visualbertmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses visualbertmodel visualbertformultiplechoice visualbertforvisualreasoning visualbertforregiontophrasealignment visualbertforquestionanswering visualbertforpretraining if istorchavailable else pipelinemodelmapping featureextraction visualbertmodel if istorchavailable else testtorchscript false testpruning false def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict copy deepcopyinputsdict if modelclass visualbertformultiplechoice for key in inputsdict keys value inputsdictkey if isinstancevalue torch tensor and value ndim 1 if key visualembeds inputsdictkey inputsdictkey unsqueeze1 expand1 self modeltester numchoices 1 contiguous else inputsdictkey inputsdictkey unsqueeze1 expand1 self modeltester numchoices 1 self modeltester visualembeddingdim contiguous elif modelclass visualbertforregiontophrasealignment totallength self modeltester seqlength self modeltester visualseqlength batchsize self modeltester batchsize inputsdictregiontophraseposition torch zeros batchsize totallength dtypetorch long devicetorchdevice if returnlabels if modelclass visualbertformultiplechoice inputsdictlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice elif modelclass visualbertforpretraining totallength self modeltester seqlength self modeltester visualseqlength batchsize self modeltester batchsize inputsdictlabels torch zeros batchsize totallength dtypetorch long devicetorchdevice inputsdictsentenceimagelabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice flickr expects float labels elif modelclass visualbertforregiontophrasealignment batchsize self modeltester batchsize totallength self modeltester seqlength self modeltester visualseqlength inputsdictlabels torch ones batchsize totallength self modeltester visualseqlength dtypetorch float devicetorchdevice vqa expects float labels elif modelclass visualbertforquestionanswering inputsdictlabels torch ones self modeltester batchsize self modeltester numlabels dtypetorch float devicetorchdevice elif modelclass visualbertforvisualreasoning inputsdictlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice return inputsdict def setupself self modeltester visualbertmodeltesterself self configtester configtesterself configclassvisualbertconfig hiddensize37 def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none visualseqlen getattrself modeltester visualseqlength none encoderseqlength seqlen if seqlen is not none else 0 visualseqlen if visualseqlen is not none else 0 encoderkeylength getattrself modeltester keylength encoderseqlength chunklength getattrself modeltester chunklength none if chunklength is not none and hasattrself modeltester numhashes encoderseqlength encoderseqlength self modeltester numhashes for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers if chunklength is not none self assertlistequal listattentions0 shape4 self modeltester numattentionheads encoderseqlength chunklength encoderkeylength else self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass if hasattrself modeltester numhiddenstatestypes addedhiddenstates self modeltester numhiddenstatestypes elif self isencoderdecoder addedhiddenstates 2 else addedhiddenstates 1 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers if chunklength is not none self assertlistequal listselfattentions0 shape4 self modeltester numattentionheads encoderseqlength chunklength encoderkeylength else self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderkeylength def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester encoderseqlength if hasattrself modeltester chunklength and self modeltester chunklength 1 seqlength seqlength self modeltester chunklength else seqlength self modeltester seqlength self modeltester visualseqlength self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester createandcheckmodelconfigandinputs def testmodelvariousembeddingsself configandinputs self modeltester prepareconfigandinputsforcommon for type in absolute relativekey relativekeyquery configandinputs0 positionembeddingtype type self modeltester createandcheckmodelconfigandinputs def testmodelforpretrainingself configandinputs self modeltester prepareconfigandinputsforpretraining self modeltester createandcheckforpretrainingconfigandinputs def testmodelforvqaself configandinputs self modeltester prepareconfigandinputsforvqa self modeltester createandcheckforvqaconfigandinputs def testmodelfornlvrself configandinputs self modeltester prepareconfigandinputsfornlvr self modeltester createandcheckfornlvrconfigandinputs def testmodelformultiplechoiceself configandinputs self modeltester prepareconfigandinputsformultiplechoice self modeltester createandcheckformultiplechoiceconfigandinputs def testmodelforflickrself configandinputs self modeltester prepareconfigandinputsforflickr self modeltester createandcheckforflickrconfigandinputs slow def testmodelfrompretrainedself for modelname in visualbertpretrainedmodelarchivelist 1 model visualbertmodel frompretrainedmodelname self assertisnotnonemodel unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass requiretorch class visualbertmodelintegrationtestunittest testcase slow def testinferencevqacocopreself model visualbertforpretraining frompretraineduclanlpvisualbertvqacocopre inputids torch tensor1 2 3 4 5 6 dtypetorch long reshape1 1 tokentypeids torch tensor0 0 0 1 1 1 dtypetorch long reshape1 1 visualembeds torch onessize1 10 2048 dtypetorch float32 0 5 visualtokentypeids torch onessize1 10 dtypetorch long attentionmask torch tensor1 6 reshape1 1 visualattentionmask torch tensor1 10 reshape1 1 with torch nograd output model inputidsinputids attentionmaskattentionmask tokentypeidstokentypeids visualembedsvisualembeds visualattentionmaskvisualattentionmask visualtokentypeidsvisualtokentypeids vocabsize 30522 expectedshape torch size1 16 vocabsize self assertequaloutput predictionlogits shape expectedshape expectedslice torch tensor 5 1858 5 1903 4 9142 6 2214 5 9238 5 8381 6 3027 5 9939 5 9297 self asserttruetorch allcloseoutput predictionlogits 3 3 expectedslice atol1e4 expectedshape2 torch size1 2 self assertequaloutput seqrelationshiplogits shape expectedshape2 expectedslice2 torch tensor0 7393 0 1754 self asserttruetorch allcloseoutput seqrelationshiplogits expectedslice2 atol1e4 slow def testinferencevqaself model visualbertforquestionanswering frompretraineduclanlpvisualbertvqa inputids torch tensor1 2 3 4 5 6 dtypetorch long reshape1 1 tokentypeids torch tensor0 0 0 1 1 1 dtypetorch long reshape1 1 visualembeds torch onessize1 10 2048 dtypetorch float32 0 5 visualtokentypeids torch onessize1 10 dtypetorch long attentionmask torch tensor1 6 reshape1 1 visualattentionmask torch tensor1 10 reshape1 1 with torch nograd output model inputidsinputids attentionmaskattentionmask tokentypeidstokentypeids visualembedsvisualembeds visualattentionmaskvisualattentionmask visualtokentypeidsvisualtokentypeids vocabsize 30522 expectedshape torch size1 3129 self assertequaloutput logits shape expectedshape expectedslice torch tensor 8 9898 3 0803 1 8016 2 4542 8 3420 2 0224 3 3124 4 4139 3 1491 3 8997 self asserttruetorch allcloseoutput logits 10 expectedslice atol1e4 slow def testinferencenlvrself model visualbertforvisualreasoning frompretraineduclanlpvisualbertnlvr2 inputids torch tensor1 2 3 4 5 6 dtypetorch long reshape1 1 tokentypeids torch tensor0 0 0 1 1 1 dtypetorch long reshape1 1 visualembeds torch onessize1 10 1024 dtypetorch float32 0 5 visualtokentypeids torch onessize1 10 dtypetorch long attentionmask torch tensor1 6 reshape1 1 visualattentionmask torch tensor1 10 reshape1 1 with torch nograd output model inputidsinputids attentionmaskattentionmask tokentypeidstokentypeids visualembedsvisualembeds visualattentionmaskvisualattentionmask visualtokentypeidsvisualtokentypeids vocabsize 30522 expectedshape torch size1 2 self assertequaloutput logits shape expectedshape expectedslice torch tensor1 1436 0 8900 self asserttruetorch allcloseoutput logits expectedslice atol1e4 slow def testinferencevcrself model visualbertformultiplechoice frompretraineduclanlpvisualbertvcr inputids torch tensor1 2 3 4 5 6 for i in range4 dtypetorch long attentionmask torch oneslikeinputids tokentypeids torch oneslikeinputids visualembeds torch onessize1 4 10 512 dtypetorch float32 0 5 visualtokentypeids torch onessize1 4 10 dtypetorch long visualattentionmask torch oneslikevisualtokentypeids with torch nograd output model inputidsinputids attentionmaskattentionmask tokentypeidstokentypeids visualembedsvisualembeds visualattentionmaskvisualattentionmask visualtokentypeidsvisualtokentypeids vocabsize 30522 expectedshape torch size1 4 self assertequaloutput logits shape expectedshape expectedslice torch tensor7 7697 7 7697 7 7697 7 7697 self asserttruetorch allcloseoutput logits expectedslice atol1e4 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch visualbert model flickr expects float labels vqa expects float labels check that output_attentions also work using config check attention is always last and order is fine check that output_hidden_states also work using config vocab_size 30522 vocab_size 30522 vocab_size 30522
import copy import unittest from transformers import VisualBertConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForRegionToPhraseAlignment, VisualBertForVisualReasoning, VisualBertModel, ) from transformers.models.visual_bert.modeling_visual_bert import VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST class VisualBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, visual_seq_length=5, is_training=True, use_attention_mask=True, use_visual_attention_mask=True, use_token_type_ids=True, use_visual_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, visual_embedding_dim=20, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.visual_seq_length = visual_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_visual_attention_mask = use_visual_attention_mask self.use_token_type_ids = use_token_type_ids self.use_visual_token_type_ids = use_visual_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.visual_embedding_dim = visual_embedding_dim self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def get_config(self): return VisualBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, visual_embedding_dim=self.visual_embedding_dim, num_labels=self.num_labels, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) visual_embeds = floats_tensor([self.batch_size, self.visual_seq_length, self.visual_embedding_dim]) attention_mask = None if self.use_attention_mask: attention_mask = torch.ones((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device) visual_attention_mask = None if self.use_visual_attention_mask: visual_attention_mask = torch.ones( (self.batch_size, self.visual_seq_length), dtype=torch.long, device=torch_device ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) visual_token_type_ids = None if self.use_visual_token_type_ids: visual_token_type_ids = ids_tensor([self.batch_size, self.visual_seq_length], self.type_vocab_size) config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } def prepare_config_and_inputs_for_pretraining(self): masked_lm_labels = None sentence_image_labels = None if self.use_labels: masked_lm_labels = ids_tensor([self.batch_size, self.seq_length + self.visual_seq_length], self.vocab_size) sentence_image_labels = ids_tensor( [self.batch_size], self.type_sequence_label_size, ) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": masked_lm_labels, "sentence_image_labels": sentence_image_labels}) return config, input_dict def prepare_config_and_inputs_for_multiple_choice(self): input_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.vocab_size) visual_embeds = floats_tensor( [self.batch_size, self.num_choices, self.visual_seq_length, self.visual_embedding_dim] ) attention_mask = None if self.use_attention_mask: attention_mask = torch.ones( (self.batch_size, self.num_choices, self.seq_length), dtype=torch.long, device=torch_device ) visual_attention_mask = None if self.use_visual_attention_mask: visual_attention_mask = torch.ones( (self.batch_size, self.num_choices, self.visual_seq_length), dtype=torch.long, device=torch_device ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.type_vocab_size) visual_token_type_ids = None if self.use_visual_token_type_ids: visual_token_type_ids = ids_tensor( [self.batch_size, self.num_choices, self.visual_seq_length], self.type_vocab_size ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, "labels": labels, } def prepare_config_and_inputs_for_vqa(self): vqa_labels = None if self.use_labels: vqa_labels = floats_tensor([self.batch_size, self.num_labels]) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": vqa_labels}) return config, input_dict def prepare_config_and_inputs_for_nlvr(self): nlvr_labels = None if self.use_labels: nlvr_labels = ids_tensor([self.batch_size], self.num_labels) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"labels": nlvr_labels}) return config, input_dict def prepare_config_and_inputs_for_flickr(self): region_to_phrase_position = torch.cat( ( ids_tensor([self.batch_size, self.seq_length], self.visual_seq_length), torch.ones(self.batch_size, self.visual_seq_length, dtype=torch.long, device=torch_device) * -1, ), dim=-1, ) flickr_labels = None if self.use_labels: flickr_labels = floats_tensor( [self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length] ) config, input_dict = self.prepare_config_and_inputs_for_common() input_dict.update({"region_to_phrase_position": region_to_phrase_position, "labels": flickr_labels}) return config, input_dict def create_and_check_model(self, config, input_dict): model = VisualBertModel(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.hidden_size), ) def create_and_check_for_pretraining(self, config, input_dict): model = VisualBertForPreTraining(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.prediction_logits.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.vocab_size), ) def create_and_check_for_vqa(self, config, input_dict): model = VisualBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice(self, config, input_dict): model = VisualBertForMultipleChoice(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_nlvr(self, config, input_dict): model = VisualBertForVisualReasoning(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_flickr(self, config, input_dict): model = VisualBertForRegionToPhraseAlignment(config=config) model.to(torch_device) model.eval() result = model(**input_dict) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length) ) @require_torch class VisualBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( VisualBertModel, VisualBertForMultipleChoice, VisualBertForVisualReasoning, VisualBertForRegionToPhraseAlignment, VisualBertForQuestionAnswering, VisualBertForPreTraining, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": VisualBertModel} if is_torch_available() else {} test_torchscript = False test_pruning = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class == VisualBertForMultipleChoice: for key in inputs_dict.keys(): value = inputs_dict[key] if isinstance(value, torch.Tensor) and value.ndim > 1: if key != "visual_embeds": inputs_dict[key] = ( inputs_dict[key].unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() ) else: inputs_dict[key] = ( inputs_dict[key] .unsqueeze(1) .expand(-1, self.model_tester.num_choices, -1, self.model_tester.visual_embedding_dim) .contiguous() ) elif model_class == VisualBertForRegionToPhraseAlignment: total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length batch_size = self.model_tester.batch_size inputs_dict["region_to_phrase_position"] = torch.zeros( (batch_size, total_length), dtype=torch.long, device=torch_device, ) if return_labels: if model_class == VisualBertForMultipleChoice: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class == VisualBertForPreTraining: total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length batch_size = self.model_tester.batch_size inputs_dict["labels"] = torch.zeros( (batch_size, total_length), dtype=torch.long, device=torch_device, ) inputs_dict["sentence_image_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class == VisualBertForRegionToPhraseAlignment: batch_size = self.model_tester.batch_size total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length inputs_dict["labels"] = torch.ones( ( batch_size, total_length, self.model_tester.visual_seq_length, ), dtype=torch.float, device=torch_device, ) elif model_class == VisualBertForQuestionAnswering: inputs_dict["labels"] = torch.ones( (self.model_tester.batch_size, self.model_tester.num_labels), dtype=torch.float, device=torch_device, ) elif model_class == VisualBertForVisualReasoning: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = VisualBertModelTester(self) self.config_tester = ConfigTester(self, config_class=VisualBertConfig, hidden_size=37) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) visual_seq_len = getattr(self.model_tester, "visual_seq_length", None) encoder_seq_length = (seq_len if seq_len is not None else 0) + ( visual_seq_len if visual_seq_len is not None else 0 ) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length + self.model_tester.visual_seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_pretraining() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_model_for_vqa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_vqa() self.model_tester.create_and_check_for_vqa(*config_and_inputs) def test_model_for_nlvr(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_nlvr() self.model_tester.create_and_check_for_nlvr(*config_and_inputs) def test_model_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_multiple_choice() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_model_for_flickr(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_flickr() self.model_tester.create_and_check_for_flickr(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = VisualBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class VisualBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_vqa_coco_pre(self): model = VisualBertForPreTraining.from_pretrained("uclanlp/visualbert-vqa-coco-pre") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) vocab_size = 30522 expected_shape = torch.Size((1, 16, vocab_size)) self.assertEqual(output.prediction_logits.shape, expected_shape) expected_slice = torch.tensor( [[[-5.1858, -5.1903, -4.9142], [-6.2214, -5.9238, -5.8381], [-6.3027, -5.9939, -5.9297]]] ) self.assertTrue(torch.allclose(output.prediction_logits[:, :3, :3], expected_slice, atol=1e-4)) expected_shape_2 = torch.Size((1, 2)) self.assertEqual(output.seq_relationship_logits.shape, expected_shape_2) expected_slice_2 = torch.tensor([[0.7393, 0.1754]]) self.assertTrue(torch.allclose(output.seq_relationship_logits, expected_slice_2, atol=1e-4)) @slow def test_inference_vqa(self): model = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) expected_shape = torch.Size((1, 3129)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor( [[-8.9898, 3.0803, -1.8016, 2.4542, -8.3420, -2.0224, -3.3124, -4.4139, -3.1491, -3.8997]] ) self.assertTrue(torch.allclose(output.logits[:, :10], expected_slice, atol=1e-4)) @slow def test_inference_nlvr(self): model = VisualBertForVisualReasoning.from_pretrained("uclanlp/visualbert-nlvr2") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 1024), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) expected_shape = torch.Size((1, 2)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-1.1436, 0.8900]]) self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4)) @slow def test_inference_vcr(self): model = VisualBertForMultipleChoice.from_pretrained("uclanlp/visualbert-vcr") input_ids = torch.tensor([[[1, 2, 3, 4, 5, 6] for i in range(4)]], dtype=torch.long) attention_mask = torch.ones_like(input_ids) token_type_ids = torch.ones_like(input_ids) visual_embeds = torch.ones(size=(1, 4, 10, 512), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 4, 10), dtype=torch.long) visual_attention_mask = torch.ones_like(visual_token_type_ids) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) expected_shape = torch.Size((1, 4)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-7.7697, -7.7697, -7.7697, -7.7697]]) self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4))
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import ViTImageProcessor class ViTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ViTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ViTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ViTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42})
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length numpatches 1 we add 1 for the cls token test greyscale images we need to override this test because vit s forward signature is different than text models signature parameters is an ordereddict so argnames order is deterministic we need to override this test because vit expects pixelvalues instead of inputids 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token test greyscale images we need to override this test because vit s forward signature is different than text models signature parameters is an ordereddict so arg_names order is deterministic we need to override this test because vit expects pixel_values instead of input_ids
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class FlaxViTModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = ViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, ) return config, pixel_values def create_and_check_model(self, config, pixel_values): model = FlaxViTModel(config=config) result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values): config.num_labels = self.type_sequence_label_size model = FlaxViTForImageClassification(config=config) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = FlaxViTForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class FlaxViTModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def setUp(self) -> None: self.model_tester = FlaxViTModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(pixel_values, **kwargs): return model(pixel_values=pixel_values, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("google/vit-base-patch16-224") outputs = model(np.ones((1, 3, 224, 224))) self.assertIsNotNone(outputs)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow vit model from future import annotations import inspect import unittest from transformers import vitconfig from transformers testingutils import requiretf requirevision slow from transformers utils import cachedproperty istfavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istfavailable import tensorflow as tf from transformers import tfvitforimageclassification tfvitmodel if isvisionavailable from pil import image from transformers import vitimageprocessor class tfvitmodeltester def init self parent batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 numlabels3 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return vitconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange def createandcheckmodelself config pixelvalues labels model tfvitmodelconfigconfig result modelpixelvalues trainingfalse self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize test with an image with different size than the one specified in config imagesize self imagesize 2 pixelvalues pixelvalues imagesize imagesize result modelpixelvalues interpolateposencodingtrue trainingfalse seqlength imagesize self patchsize 2 1 self parent assertequalresult lasthiddenstate shape self batchsize seqlength self hiddensize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model tfvitforimageclassificationconfig result modelpixelvalues labelslabels trainingfalse self parent assertequalresult logits shape self batchsize self typesequencelabelsize test with an image with different size than the one specified in config imagesize self imagesize 2 pixelvalues pixelvalues imagesize imagesize result modelpixelvalues interpolateposencodingtrue trainingfalse self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model tfvitforimageclassificationconfig pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretf class tfvitmodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfvitmodel tfvitforimageclassification if istfavailable else pipelinemodelmapping featureextraction tfvitmodel imageclassification tfvitforimageclassification if istfavailable else testresizeembeddings false testheadmasking false testonnx false def setupself self modeltester tfvitmodeltesterself self configtester configtesterself configclassvitconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonvit does not use inputsembeds def testinputsembedsself pass unittest skipreasonvit does not use inputsembeds def testgraphmodewithinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings tf keras layers layer x model getoutputembeddings self asserttruex is none or isinstancex tf keras layers layer def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself model tfvitmodel frompretrainedgooglevitbasepatch16224 self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretf requirevision class tfvitmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return vitimageprocessor frompretrainedgooglevitbasepatch16224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model tfvitforimageclassification frompretrainedgooglevitbasepatch16224 imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorstf forward pass outputs modelinputs verify the logits expectedshape tf tensorshape1 1000 self assertequaloutputs logits shape expectedshape expectedslice tf constant0 2744 0 8215 0 0836 tf debugging assertnearoutputs logits0 3 expectedslice atol1e4 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow vit model in vit the seq length equals the number of patches 1 we add 1 for the cls token test with an image with different size than the one specified in config test with an image with different size than the one specified in config test greyscale images here we also overwrite some of the tests of test_modeling_tf_common py as vit does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic we will verify our results on an image of cute cats forward pass verify the logits
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class TFViTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = TFViTModel(config=config) result = model(pixel_values, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) image_size = self.image_size // 2 pixel_values = pixel_values[:, :, :image_size, :image_size] result = model(pixel_values, interpolate_pos_encoding=True, training=False) seq_length = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFViTForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) image_size = self.image_size // 2 pixel_values = pixel_values[:, :, :image_size, :image_size] result = model(pixel_values, interpolate_pos_encoding=True, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = TFViTForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFViTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFViTModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ViT does not use inputs_embeds") def test_graph_mode_with_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFViTModel.from_pretrained("google/vit-base-patch16-224") self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") outputs = model(**inputs) expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.2744, 0.8215, -0.0836]) tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vit model import unittest from transformers import vitconfig from transformers testingutils import requireaccelerate requiretorch requiretorchaccelerator requiretorchfp16 requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import vitforimageclassification vitformaskedimagemodeling vitmodel from transformers models vit modelingvit import vitpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import vitimageprocessor class vitmodeltester def init self parent batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 scopenone encoderstride2 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self encoderstride encoderstride in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return vitconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange encoderstrideself encoderstride def createandcheckmodelself config pixelvalues labels model vitmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckformaskedimagemodelingself config pixelvalues labels model vitformaskedimagemodelingconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result reconstruction shape self batchsize self numchannels self imagesize self imagesize test greyscale images config numchannels 1 model vitformaskedimagemodelingconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult reconstruction shape self batchsize 1 self imagesize self imagesize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model vitforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model vitforimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class vitmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses vitmodel vitforimageclassification vitformaskedimagemodeling if istorchavailable else pipelinemodelmapping featureextraction vitmodel imageclassification vitforimageclassification if istorchavailable else fxcompatible true testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester vitmodeltesterself self configtester configtesterself configclassvitconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonvit does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testformaskedimagemodelingself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckformaskedimagemodelingconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in vitpretrainedmodelarchivelist 1 model vitmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class vitmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return vitimageprocessor frompretrainedgooglevitbasepatch16224 if isvisionavailable else none slow def testinferenceimageclassificationheadself model vitforimageclassification frompretrainedgooglevitbasepatch16224 totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 2744 0 8215 0 0836 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 slow def testinferenceinterpolateposencodingself vit models have an interpolateposencoding argument in their forward method allowing to interpolate the pretrained position embeddings in order to use the model on higher resolutions the dino model by facebook ai leverages this to visualize selfattention on higher resolution images model vitmodel frompretrainedfacebookdinovits8 totorchdevice imageprocessor vitimageprocessor frompretrainedfacebookdinovits8 size480 image prepareimg inputs imageprocessorimagesimage returntensorspt pixelvalues inputs pixelvalues totorchdevice forward pass with torch nograd outputs modelpixelvalues interpolateposencodingtrue verify the logits expectedshape torch size1 3601 384 self assertequaloutputs lasthiddenstate shape expectedshape expectedslice torch tensor 4 2340 4 3906 6 6692 4 5463 1 8928 6 7257 4 4429 0 8496 5 8585 totorchdevice self asserttruetorch allcloseoutputs lasthiddenstate0 3 3 expectedslice atol1e4 slow requireaccelerate requiretorchaccelerator requiretorchfp16 def testinferencefp16self r a small test to make sure that inference work in half precision without any problem forward pass to make sure inference works in fp16 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vit model in vit the seq length equals the number of patches 1 we add 1 for the cls token test greyscale images test greyscale images here we also overwrite some of the tests of test_modeling_common py as vit does not use input_ids inputs_embeds attention_mask and seq_length we will verify our results on an image of cute cats forward pass verify the logits vit models have an interpolate_pos_encoding argument in their forward method allowing to interpolate the pre trained position embeddings in order to use the model on higher resolutions the dino model by facebook ai leverages this to visualize self attention on higher resolution images forward pass verify the logits a small test to make sure that inference work in half precision without any problem forward pass to make sure inference works in fp16
import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class ViTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, encoder_stride=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.encoder_stride = encoder_stride num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = ViTForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) config.num_channels = 1 model = ViTForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = ViTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = ViTForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ViTModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.2744, 0.8215, -0.0836]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_interpolate_pos_encoding(self): model = ViTModel.from_pretrained("facebook/dino-vits8").to(torch_device) image_processor = ViTImageProcessor.from_pretrained("facebook/dino-vits8", size=480) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values, interpolate_pos_encoding=True) expected_shape = torch.Size((1, 3601, 384)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r model = ViTModel.from_pretrained("facebook/dino-vits8", torch_dtype=torch.float16, device_map="auto") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) with torch.no_grad(): _ = model(pixel_values)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vit hybrid model import unittest from transformers import vithybridconfig from transformers testingutils import requireaccelerate requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import vithybridforimageclassification vithybridimageprocessor vithybridmodel from transformers models vithybrid modelingvithybrid import vithybridpretrainedmodelarchivelist if isvisionavailable from pil import image class vithybridmodeltester def init self parent batchsize13 imagesize64 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 backbonefeatmapshape1 16 4 4 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self backbonefeatmapshape backbonefeatmapshape in vit hybrid the seq length equals the number of patches 1 we add 1 for the cls token the number of patches is based on the feature map of the backbone which by default uses an output stride of 32 which means that the feature map has a spatial resolution of 132 of the input image size numpatches self imagesize 32 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself backboneconfig globalpadding same layertype bottleneck depths 3 4 9 outfeatures stage1 stage2 stage3 embeddingdynamicpadding true hiddensizes 4 8 16 32 numgroups 2 return vithybridconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange backbonefeatmapshapeself backbonefeatmapshape backboneconfigbackboneconfig def createandcheckmodelself config pixelvalues labels model vithybridmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model vithybridforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class vithybridmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses vithybridmodel vithybridforimageclassification if istorchavailable else pipelinemodelmapping featureextraction vithybridmodel imageclassification vithybridforimageclassification if istorchavailable else testpruning false testresizeembeddings false testheadmasking false modelsplitpercents 0 5 0 9 def setupself self modeltester vithybridmodeltesterself self configtester configtesterself configclassvithybridconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonvit does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit skip the check for the backbone for name module in model namedmodules if module class name vithybridpatchembeddings backboneparams fname key for key in module statedict keys break for name param in model namedparameters if param requiresgrad if name in backboneparams continue self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized slow def testmodelfrompretrainedself for modelname in vithybridpretrainedmodelarchivelist 1 model vithybridmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class vitmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return vithybridimageprocessor frompretrainedvithybridpretrainedmodelarchivelist0 if isvisionavailable else none slow def testinferenceimageclassificationheadself model vithybridforimageclassification frompretrainedvithybridpretrainedmodelarchivelist0 to torchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor1 9090 0 4993 0 2389 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 slow requireaccelerate def testaccelerateinferenceself imageprocessor vithybridimageprocessor frompretrainedgooglevithybridbasebit384 model vithybridforimageclassification frompretrainedgooglevithybridbasebit384 devicemapauto image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice outputs modelinputs logits outputs logits model predicts one of the 1000 imagenet classes predictedclassidx logits argmax1 item self asserttruemodel config id2labelpredictedclassidx tabby tabby cat coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vit hybrid model in vit hybrid the seq length equals the number of patches 1 we add 1 for the cls token the number of patches is based on the feature map of the backbone which by default uses an output stride of 32 which means that the feature map has a spatial resolution of 1 32 of the input image size here we also overwrite some of the tests of test_modeling_common py as vit does not use input_ids inputs_embeds attention_mask and seq_length skip the check for the backbone we will verify our results on an image of cute cats forward pass verify the logits model predicts one of the 1000 imagenet classes
import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class ViTHybridModelTester: def __init__( self, parent, batch_size=13, image_size=64, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, backbone_featmap_shape=[1, 16, 4, 4], scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.backbone_featmap_shape = backbone_featmap_shape num_patches = (self.image_size // 32) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [4, 8, 16, 32], "num_groups": 2, } return ViTHybridConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=backbone_config, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTHybridModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = ViTHybridForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTHybridModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False model_split_percents = [0.5, 0.9] def setUp(self): self.model_tester = ViTHybridModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTHybridConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTHybridModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.9090, -0.4993, -0.2389]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow @require_accelerate def test_accelerate_inference(self): image_processor = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384") model = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384", device_map="auto") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() self.assertTrue(model.config.id2label[predicted_class_idx], "tabby, tabby cat")
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vitmsn model import unittest from transformers import vitmsnconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import vitmsnforimageclassification vitmsnmodel from transformers models vitmsn modelingvitmsn import vitmsnpretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import vitimageprocessor class vitmsnmodeltester def init self parent batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope in vit msn the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return vitmsnconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange def createandcheckmodelself config pixelvalues labels model vitmsnmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforimageclassificationself config pixelvalues labels config numlabels self typesequencelabelsize model vitmsnforimageclassificationconfig model totorchdevice model eval result modelpixelvalues labelslabels printpixel and labels shape pixelvalues shape labels shape printlabels labels self parent assertequalresult logits shape self batchsize self typesequencelabelsize test greyscale images config numchannels 1 model vitmsnforimageclassificationconfig model totorchdevice model eval pixelvalues floatstensorself batchsize 1 self imagesize self imagesize result modelpixelvalues self parent assertequalresult logits shape self batchsize self typesequencelabelsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class vitmsnmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses vitmsnmodel vitmsnforimageclassification if istorchavailable else pipelinemodelmapping featureextraction vitmsnmodel imageclassification vitmsnforimageclassification if istorchavailable else testpruning false testtorchscript false testresizeembeddings false testheadmasking false def setupself self modeltester vitmsnmodeltesterself self configtester configtesterself configclassvitmsnconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonvitmsn does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforimageclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforimageclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in vitmsnpretrainedmodelarchivelist 1 model vitmsnmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg image image open testsfixturestestssamplescoco000000039769 png return image requiretorch requirevision class vitmsnmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return vitimageprocessor frompretrainedfacebookvitmsnsmall if isvisionavailable else none slow def testinferenceimageclassificationheadself torch manualseed2 model vitmsnforimageclassification frompretrainedfacebookvitmsnsmall totorchdevice imageprocessor self defaultimageprocessor image prepareimg inputs imageprocessorimagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 1000 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 0803 0 4454 0 2375 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vitmsn model in vit msn the seq length equals the number of patches 1 we add 1 for the cls token test greyscale images here we also overwrite some of the tests of test_modeling_common py as vitmsn does not use input_ids inputs_embeds attention_mask and seq_length we will verify our results on an image of cute cats forward pass verify the logits
import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class ViTMSNModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTMSNConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTMSNModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = ViTMSNForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}") print("Labels: {labels}") self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) config.num_channels = 1 model = ViTMSNForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTMSNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ViTMSNModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTMSNConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTMSNModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTMSNModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small") if is_vision_available() else None @slow def test_inference_image_classification_head(self): torch.manual_seed(2) model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.0803, -0.4454, -0.2375]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vitdet model import unittest from transformers import vitdetconfig from transformers testingutils import requiretorch torchdevice from transformers utils import istorchavailable from testbackbonecommon import backbonetestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import vitdetbackbone vitdetmodel class vitdetmodeltester def init self parent batchsize13 imagesize30 patchsize2 numchannels3 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self numpatchesonedirection self imagesize self patchsize self seqlength self imagesize self patchsize 2 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config pixelvalues labels def getconfigself return vitdetconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange def createandcheckmodelself config pixelvalues labels model vitdetmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequal result lasthiddenstate shape self batchsize self hiddensize self numpatchesonedirection self numpatchesonedirection def createandcheckbackboneself config pixelvalues labels model vitdetbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify hidden states self parent assertequallenresult featuremaps lenconfig outfeatures self parent assertlistequal listresult featuremaps0 shape self batchsize self hiddensize self numpatchesonedirection self numpatchesonedirection verify channels self parent assertequallenmodel channels lenconfig outfeatures self parent assertlistequalmodel channels config hiddensize verify backbone works with outfeaturesnone config outfeatures none model vitdetbackboneconfigconfig model totorchdevice model eval result modelpixelvalues verify feature maps self parent assertequallenresult featuremaps 1 self parent assertlistequal listresult featuremaps0 shape self batchsize self hiddensize self numpatchesonedirection self numpatchesonedirection verify channels self parent assertequallenmodel channels 1 self parent assertlistequalmodel channels config hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class vitdetmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses vitdetmodel vitdetbackbone if istorchavailable else pipelinemodelmapping featureextraction vitdetmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester vitdetmodeltesterself self configtester configtesterself configclassvitdetconfig hastextmodalityfalse hiddensize37 todo fix me once this model gets more usage unittest skipdoes not work on the tiny model as we keep hitting edge cases def testcpuoffloadself super testcpuoffload todo fix me once this model gets more usage unittest skipdoes not work on the tiny model as we keep hitting edge cases def testdiskoffloadbinself super testdiskoffload unittest skipdoes not work on the tiny model as we keep hitting edge cases def testdiskoffloadsafetensorsself super testdiskoffload todo fix me once this model gets more usage unittest skipdoes not work on the tiny model as we keep hitting edge cases def testmodelparallelismself super testmodelparallelism def testconfigself self configtester runcommontests unittest skipreasonvitdet does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testbackboneself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbackboneconfigandinputs def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumstages self modeltester numhiddenlayers self assertequallenhiddenstates expectednumstages 1 vitdet s feature maps are of shape batchsize numchannels height width self assertlistequal listhiddenstates0 shape2 self modeltester numpatchesonedirection self modeltester numpatchesonedirection config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass overwrite since vitdet only supports retraining gradients of hidden states def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions self hasattentions no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice inputs self prepareforclassinputsdict modelclass outputs modelinputs output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 hiddenstates retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad unittest skipreasonvitdet does not support feedforward chunking def testfeedforwardchunkingself pass unittest skipreasonvitdet does not have standalone checkpoints since it used as backbone in other models def testmodelfrompretrainedself pass requiretorch class vitdetbackbonetestunittest testcase backbonetestermixin allmodelclasses vitdetbackbone if istorchavailable else configclass vitdetconfig hasattentions false def setupself self modeltester vitdetmodeltesterself coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vitdet model verify hidden states verify channels verify backbone works with out_features none verify feature maps verify channels here we also overwrite some of the tests of test_modeling_common py as vitdet does not use input_ids inputs_embeds attention_mask and seq_length todo fix me once this model gets more usage todo fix me once this model gets more usage todo fix me once this model gets more usage vitdet s feature maps are of shape batch_size num_channels height width check that output_hidden_states also work using config overwrite since vitdet only supports retraining gradients of hidden states no need to test all models as different heads yield the same functionality encoder decoder only models
import unittest from transformers import VitDetConfig from transformers.testing_utils import require_torch, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import VitDetBackbone, VitDetModel class VitDetModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.num_patches_one_direction = self.image_size // self.patch_size self.seq_length = (self.image_size // self.patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return VitDetConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = VitDetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_size, self.num_patches_one_direction, self.num_patches_one_direction), ) def create_and_check_backbone(self, config, pixel_values, labels): model = VitDetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, self.num_patches_one_direction, self.num_patches_one_direction], ) self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, [config.hidden_size]) config.out_features = None model = VitDetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, self.num_patches_one_direction, self.num_patches_one_direction], ) self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_size]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VitDetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (VitDetModel, VitDetBackbone) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": VitDetModel} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VitDetModelTester(self) self.config_tester = ConfigTester(self, config_class=VitDetConfig, has_text_modality=False, hidden_size=37) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_cpu_offload(self): super().test_cpu_offload() @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload_bin(self): super().test_disk_offload() @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload_safetensors(self): super().test_disk_offload() @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): super().test_model_parallelism() def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="VitDet does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = self.model_tester.num_hidden_layers self.assertEqual(len(hidden_states), expected_num_stages + 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [ self.model_tester.num_patches_one_direction, self.model_tester.num_patches_one_direction, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) @unittest.skip(reason="VitDet does not support feedforward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="VitDet does not have standalone checkpoints since it used as backbone in other models") def test_model_from_pretrained(self): pass @require_torch class VitDetBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (VitDetBackbone,) if is_torch_available() else () config_class = VitDetConfig has_attentions = False def setUp(self): self.model_tester = VitDetModelTester(self)
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize imageprocessing create random numpy tensors test not batched input image processor does not support batched inputs verify that width and height can be divided by sizedivisibility initialize imageprocessing create random pytorch tensors test not batched input image processor does not support batched inputs verify that width and height can be divided by sizedivisibility initialize imageprocessing create random pil images test not batched input image processor does not support batched inputs verify that width and height can be divided by sizedivisibility test that can process images which have an arbitrary number of channels initialize imageprocessing create random numpy tensors test not batched input image processor does not support batched inputs verify that width and height can be divided by sizedivisibility coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license initialize image_processing create random numpy tensors test not batched input image processor does not support batched inputs verify that width and height can be divided by size_divisibility initialize image_processing create random pytorch tensors test not batched input image processor does not support batched inputs verify that width and height can be divided by size_divisibility initialize image_processing create random pil images test not batched input image processor does not support batched inputs verify that width and height can be divided by size_divisibility test that can process images which have an arbitrary number of channels initialize image_processing create random numpy tensors test not batched input image processor does not support batched inputs verify that width and height can be divided by size_divisibility
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VitMatteImageProcessor class VitMatteImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_rescale=True, rescale_factor=0.5, do_pad=True, size_divisibility=10, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.size_divisibility = size_divisibility self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, "size_divisibility": self.size_divisibility, } def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class VitMatteImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VitMatteImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = VitMatteImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size_divisibility")) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.size[::-1]) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_call_numpy_4_channels(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) encoded_images = image_processor( images=image, trimaps=trimap, input_data_format="channels_first", image_mean=0, image_std=1, return_tensors="pt", ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_padding(self): image_processing = self.image_processing_class(**self.image_processor_dict) image = np.random.randn(3, 249, 491) images = image_processing.pad_image(image) assert images.shape == (3, 256, 512)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vitmatte model import unittest from huggingfacehub import hfhubdownload from transformers import vitmatteconfig from transformers testingutils import requiretorch slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import vitdetconfig vitmatteforimagematting from transformers models vitmatte modelingvitmatte import vitmattepretrainedmodelarchivelist if isvisionavailable from pil import image from transformers import vitmatteimageprocessor class vitmattemodeltester def init self parent batchsize13 imagesize32 patchsize16 numchannels4 istrainingtrue uselabelsfalse hiddensize2 numhiddenlayers2 numattentionheads2 hiddenactgelu typesequencelabelsize10 initializerrange0 02 scopenone outfeaturesstage1 fusionhiddensizes128 64 32 16 self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddenact hiddenact self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self outfeatures outfeatures self fusionhiddensizes fusionhiddensizes self seqlength self imagesize self patchsize 2 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize labels none if self uselabels raise notimplementederrortraining is not yet supported config self getconfig return config pixelvalues labels def getbackboneconfigself return vitdetconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddensizeself hiddensize istrainingself istraining hiddenactself hiddenact outfeaturesself outfeatures def getconfigself return vitmatteconfig backboneconfigself getbackboneconfig hiddensizeself hiddensize fusionhiddensizesself fusionhiddensizes def createandcheckmodelself config pixelvalues labels model vitmatteforimagemattingconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult alphas shape self batchsize 1 self imagesize self imagesize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class vitmattemodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses vitmatteforimagematting if istorchavailable else pipelinemodelmapping fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester vitmattemodeltesterself self configtester configtesterself configclassvitmatteconfig hastextmodalityfalse hiddensize37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return unittest skipreasonvitmatte does not use inputsembeds def testinputsembedsself pass unittest skipreasontraining is not yet supported def testtrainingself pass unittest skipreasontraining is not yet supported def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonvitmatte does not support input and output embeddings def testmodelcommonattributesself pass def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs slow def testmodelfrompretrainedself for modelname in vitmattepretrainedmodelarchivelist 1 model vitmatteforimagematting frompretrainedmodelname self assertisnotnonemodel unittest skipreasonvitmatte does not support retaining gradient on attention logits def testretaingradhiddenstatesattentionsself pass def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers self assertlistequal listhiddenstates0 shape2 2 2 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true printhello we re here checkhiddenstatesoutputinputsdict config modelclass requiretorch class vitmattemodelintegrationtestunittest testcase slow def testinferenceself processor vitmatteimageprocessor frompretrainedhustvlvitmattesmallcomposition1k model vitmatteforimagematting frompretrainedhustvlvitmattesmallcomposition1k totorchdevice filepath hfhubdownload repoidhfinternaltestingimagemattingfixtures filenameimage png repotypedataset image image openfilepath convertrgb filepath hfhubdownload repoidhfinternaltestingimagemattingfixtures filenametrimap png repotypedataset trimap image openfilepath convertl prepare image trimap for the model inputs processorimagesimage trimapstrimap returntensorspt totorchdevice with torch nograd alphas modelinputs alphas expectedshape torch size1 1 640 960 self assertequalalphas shape expectedshape expectedslice torch tensor 0 9977 0 9987 0 9990 0 9980 0 9998 0 9998 0 9983 0 9998 0 9998 devicetorchdevice self asserttruetorch allclosealphas0 0 3 3 expectedslice atol1e4 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vitmatte model here we also overwrite some of the tests of test_modeling_common py as vitmatte does not use input_ids inputs_embeds attention_mask and seq_length check that output_hidden_states also work using config prepare image trimap for the model
import unittest from huggingface_hub import hf_hub_download from transformers import VitMatteConfig from transformers.testing_utils import ( require_torch, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import VitDetConfig, VitMatteForImageMatting from transformers.models.vitmatte.modeling_vitmatte import VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import VitMatteImageProcessor class VitMatteModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=16, num_channels=4, is_training=True, use_labels=False, hidden_size=2, num_hidden_layers=2, num_attention_heads=2, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, scope=None, out_features=["stage1"], fusion_hidden_sizes=[128, 64, 32, 16], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_features = out_features self.fusion_hidden_sizes = fusion_hidden_sizes self.seq_length = (self.image_size // self.patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: raise NotImplementedError("Training is not yet supported") config = self.get_config() return config, pixel_values, labels def get_backbone_config(self): return VitDetConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_size=self.hidden_size, is_training=self.is_training, hidden_act=self.hidden_act, out_features=self.out_features, ) def get_config(self): return VitMatteConfig( backbone_config=self.get_backbone_config(), hidden_size=self.hidden_size, fusion_hidden_sizes=self.fusion_hidden_sizes, ) def create_and_check_model(self, config, pixel_values, labels): model = VitMatteForImageMatting(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.alphas.shape, (self.batch_size, 1, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VitMatteModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (VitMatteForImageMatting,) if is_torch_available() else () pipeline_model_mapping = {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VitMatteModelTester(self) self.config_tester = ConfigTester(self, config_class=VitMatteConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="VitMatte does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Training is not yet supported") def test_training(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ViTMatte does not support input and output embeddings") def test_model_common_attributes(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = VitMatteForImageMatting.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="ViTMatte does not support retaining gradient on attention logits") def test_retain_grad_hidden_states_attentions(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [2, 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True print("Hello we're here") check_hidden_states_output(inputs_dict, config, model_class) @require_torch class VitMatteModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): processor = VitMatteImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k") model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k").to(torch_device) filepath = hf_hub_download( repo_id="hf-internal-testing/image-matting-fixtures", filename="image.png", repo_type="dataset" ) image = Image.open(filepath).convert("RGB") filepath = hf_hub_download( repo_id="hf-internal-testing/image-matting-fixtures", filename="trimap.png", repo_type="dataset" ) trimap = Image.open(filepath).convert("L") inputs = processor(images=image, trimaps=trimap, return_tensors="pt").to(torch_device) with torch.no_grad(): alphas = model(**inputs).alphas expected_shape = torch.Size((1, 1, 640, 960)) self.assertEqual(alphas.shape, expected_shape) expected_slice = torch.tensor( [[0.9977, 0.9987, 0.9990], [0.9980, 0.9998, 0.9998], [0.9983, 0.9998, 0.9998]], device=torch_device ) self.assertTrue(torch.allclose(alphas[0, 0, :3, :3], expected_slice, atol=1e-4))
codingutf8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the vits tokenizer import json import os import shutil import tempfile import unittest from transformers import vitstokenizer from transformers models vits tokenizationvits import vocabfilesnames from transformers testingutils import slow from testtokenizationcommon import tokenizertestermixin class vitstokenizertesttokenizertestermixin unittest testcase tokenizerclass vitstokenizer testrusttokenizer false def setupself super setup vocab k z y u d h e s w 3 c p 1 j m i x f l o 0 b r a 4 2 n x v t q 5 6 g pad unk split vocabtokens dictzipvocab rangelenvocab vocabtokens vocabtokensx del vocabtokensx self specialtokensmap padtoken pad unktoken unk self tmpdirname tempfile mkdtemp self vocabfile os path joinself tmpdirname vocabfilesnamesvocabfile with openself vocabfile w encodingutf8 as fp fp writejson dumpsvocabtokens n def gettokenizerself kwargs kwargs updateself specialtokensmap kwargsphonemize false kwargsnormalize false return vitstokenizer frompretrainedself tmpdirname kwargs def getcleansequenceself tokenizer withprefixspacefalse maxlength20 minlength5 txt beyonce lives in los angeles ids tokenizer encodetxt addspecialtokensfalse return txt ids unittest skipadding multicharacter tokens does not work with the vits tokenizer def testaddtokenstokenizerself pass unittest skipadding multicharacter tokens does not work with the vits tokenizer def testencodedecodewithspacesself pass unittest skipthe vits tokenizer does not support issplitintowords def testpretokenizedinputsself pass def testsaveandloadtokenizerself safety check on maxlen default value so we are sure the test works tokenizers self gettokenizers for tokenizer in tokenizers with self subtestftokenizer class name self assertnotequaltokenizer modelmaxlength 42 now let s start the test tokenizers self gettokenizers for tokenizer in tokenizers with self subtestftokenizer class name isolate this from the other tests because we save additional tokensetc tmpdirname tempfile mkdtemp sampletext he is very happy unwantu00e9d running beforetokens tokenizer encodesampletext addspecialtokensfalse beforevocab tokenizer getvocab tokenizer savepretrainedtmpdirname aftertokenizer tokenizer class frompretrainedtmpdirname aftertokens aftertokenizer encodesampletext addspecialtokensfalse aftervocab aftertokenizer getvocab self assertlistequalbeforetokens aftertokens self assertdictequalbeforevocab aftervocab shutil rmtreetmpdirname unittest skipadding multicharacter tokens does not work the vits tokenizer def testspecialtokensinitializationwithnonemptyadditionalspecialtokensself pass def testronnormalizationself tokenizer self gettokenizer tokenizer language ron sequences vis normalizedsequences vis encodedids tokenizersequences normalizetrueinputids decodedsequences tokenizer batchdecodeencodedids self assertequalnormalizedsequences decodedsequences def testnormalizationself tokenizer self gettokenizer sequences vits is a model for tts normalizedsequences vits is a model for tts unnormalizedsequences unkunkunkunkunk is a model for ttsunk can t handle uppercase or certain punctuations encodednormalizedids tokenizersequences normalizetrue encodedunnormalizedids tokenizersequences normalizefalse decodednormalizedsequences tokenizer decodeseq skipspecialtokensfalse for seq in encodednormalizedidsinputids decodedunnormalizedsequences tokenizer decodeseq skipspecialtokensfalse for seq in encodedunnormalizedidsinputids self assertequaldecodednormalizedsequences normalizedsequences self assertequaldecodedunnormalizedsequences unnormalizedsequences slow def testtokenizerintegrationself sequences bert is designed to pretrain deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers the quick brown fox jumps over the lazy dog we use k as our padding token normalizedsequences bert is designed to pretrain deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers the quick brown fox jumps over the lazy dog we use k as our padding token fmt off expectedencoding inputids 0 24 0 7 0 25 0 33 0 19 0 18 0 8 0 19 0 5 0 7 0 8 0 18 0 37 0 29 0 7 0 5 0 19 0 33 0 22 0 19 0 13 0 25 0 7 0 14 0 33 0 25 0 26 0 18 0 29 0 19 0 5 0 7 0 7 0 13 0 19 0 24 0 18 0 5 0 18 0 25 0 7 0 12 0 33 0 18 0 22 0 29 0 26 0 21 0 19 0 25 0 7 0 13 0 25 0 7 0 8 0 7 0 29 0 33 0 26 0 33 0 18 0 22 0 29 0 8 0 19 0 20 0 25 0 22 0 17 0 19 0 4 0 29 0 21 0 26 0 24 0 7 0 21 0 7 0 5 0 19 0 33 0 7 0 31 0 33 0 19 0 24 0 3 0 19 0 16 0 22 0 18 0 29 0 33 0 21 0 3 0 19 0 12 0 22 0 29 0 5 0 18 0 33 0 18 0 22 0 29 0 18 0 29 0 37 0 19 0 22 0 29 0 19 0 24 0 22 0 33 0 6 0 19 0 21 0 7 0 20 0 33 0 19 0 26 0 29 0 5 0 19 0 25 0 18 0 37 0 6 0 33 0 19 0 12 0 22 0 29 0 33 0 7 0 31 0 33 0 19 0 18 0 29 0 19 0 26 0 21 0 21 0 19 0 21 0 26 0 3 0 7 0 25 0 8 0 0 33 0 6 0 7 0 19 0 34 0 4 0 18 0 12 0 0 0 19 0 24 0 25 0 22 0 9 0 29 0 19 0 20 0 22 0 31 0 19 0 16 0 4 0 17 0 13 0 8 0 19 0 22 0 32 0 7 0 25 0 19 0 33 0 6 0 7 0 19 0 21 0 26 0 2 0 3 0 19 0 5 0 22 0 37 0 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 0 9 0 7 0 19 0 4 0 8 0 7 0 19 0 0 0 19 0 26 0 8 0 19 0 22 0 4 0 25 0 19 0 13 0 26 0 5 0 5 0 18 0 29 0 37 0 19 0 33 0 22 0 0 0 7 0 29 0 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 39 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt on tokenizerclasses self tokenizerclass if self testrusttokenizer tokenizerclasses appendself rusttokenizerclass for tokenizerclass in tokenizerclasses tokenizer tokenizerclass frompretrained facebookmmsttseng revision28cedf176aa99de5023a4344fd8a2cc477126fb8 to pin the tokenizer version padtokenpad encoding tokenizersequences paddingtrue normalizetrue decodedsequences tokenizer decodeseq skipspecialtokenstrue for seq in encodinginputids encodingdata encoding data self assertdictequalencodingdata expectedencoding for expected decoded in zipnormalizedsequences decodedsequences self assertequalexpected decoded coding utf 8 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the vits tokenizer safety check on max_len default value so we are sure the test works now let s start the test isolate this from the other tests because we save additional tokens etc can t handle upper case or certain punctuations fmt off fmt on to pin the tokenizer version
import json import os import shutil import tempfile import unittest from transformers import VitsTokenizer from transformers.models.vits.tokenization_vits import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class VitsTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = VitsTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = ( "k ' z y u d h e s w – 3 c p - 1 j m i X f l o 0 b r a 4 2 n _ x v t q 5 6 g ț ţ < > | <pad> <unk>".split( " " ) ) vocab_tokens = dict(zip(vocab, range(len(vocab)))) vocab_tokens[" "] = vocab_tokens["X"] del vocab_tokens["X"] self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>"} self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) kwargs["phonemize"] = False kwargs["normalize"] = False return VitsTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5): txt = "beyonce lives in los angeles" ids = tokenizer.encode(txt, add_special_tokens=False) return txt, ids @unittest.skip("Adding multicharacter tokens does not work with the VITS tokenizer") def test_add_tokens_tokenizer(self): pass @unittest.skip("Adding multicharacter tokens does not work with the VITS tokenizer") def test_encode_decode_with_spaces(self): pass @unittest.skip("The VITS tokenizer does not support `is_split_into_words`") def test_pretokenized_inputs(self): pass def test_save_and_load_tokenizer(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) shutil.rmtree(tmpdirname) @unittest.skip("Adding multicharacter tokens does not work the VITS tokenizer") def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): pass def test_ron_normalization(self): tokenizer = self.get_tokenizer() tokenizer.language = "ron" sequences = ["vițs"] normalized_sequences = ["viţs"] encoded_ids = tokenizer(sequences, normalize=True)["input_ids"] decoded_sequences = tokenizer.batch_decode(encoded_ids) self.assertEqual(normalized_sequences, decoded_sequences) def test_normalization(self): tokenizer = self.get_tokenizer() sequences = ["VITS; is a model for t-t-s!"] normalized_sequences = ["vits is a model for t-t-s"] unnormalized_sequences = [ "<unk><unk><unk><unk><unk> is a model for t-t-s<unk>" ] encoded_normalized_ids = tokenizer(sequences, normalize=True) encoded_unnormalized_ids = tokenizer(sequences, normalize=False) decoded_normalized_sequences = [ tokenizer.decode(seq, skip_special_tokens=False) for seq in encoded_normalized_ids["input_ids"] ] decoded_unnormalized_sequences = [ tokenizer.decode(seq, skip_special_tokens=False) for seq in encoded_unnormalized_ids["input_ids"] ] self.assertEqual(decoded_normalized_sequences, normalized_sequences) self.assertEqual(decoded_unnormalized_sequences, unnormalized_sequences) @slow def test_tokenizer_integration(self): sequences = [ "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox! Jumps over the lazy dog...", "We use k as our padding token", ] normalized_sequences = [ "bert is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers", "the quick brown fox jumps over the lazy dog", "we use k as our padding token", ] expected_encoding = { 'input_ids': [ [0, 24, 0, 7, 0, 25, 0, 33, 0, 19, 0, 18, 0, 8, 0, 19, 0, 5, 0, 7, 0, 8, 0, 18, 0, 37, 0, 29, 0, 7, 0, 5, 0, 19, 0, 33, 0, 22, 0, 19, 0, 13, 0, 25, 0, 7, 0, 14, 0, 33, 0, 25, 0, 26, 0, 18, 0, 29, 0, 19, 0, 5, 0, 7, 0, 7, 0, 13, 0, 19, 0, 24, 0, 18, 0, 5, 0, 18, 0, 25, 0, 7, 0, 12, 0, 33, 0, 18, 0, 22, 0, 29, 0, 26, 0, 21, 0, 19, 0, 25, 0, 7, 0, 13, 0, 25, 0, 7, 0, 8, 0, 7, 0, 29, 0, 33, 0, 26, 0, 33, 0, 18, 0, 22, 0, 29, 0, 8, 0, 19, 0, 20, 0, 25, 0, 22, 0, 17, 0, 19, 0, 4, 0, 29, 0, 21, 0, 26, 0, 24, 0, 7, 0, 21, 0, 7, 0, 5, 0, 19, 0, 33, 0, 7, 0, 31, 0, 33, 0, 19, 0, 24, 0, 3, 0, 19, 0, 16, 0, 22, 0, 18, 0, 29, 0, 33, 0, 21, 0, 3, 0, 19, 0, 12, 0, 22, 0, 29, 0, 5, 0, 18, 0, 33, 0, 18, 0, 22, 0, 29, 0, 18, 0, 29, 0, 37, 0, 19, 0, 22, 0, 29, 0, 19, 0, 24, 0, 22, 0, 33, 0, 6, 0, 19, 0, 21, 0, 7, 0, 20, 0, 33, 0, 19, 0, 26, 0, 29, 0, 5, 0, 19, 0, 25, 0, 18, 0, 37, 0, 6, 0, 33, 0, 19, 0, 12, 0, 22, 0, 29, 0, 33, 0, 7, 0, 31, 0, 33, 0, 19, 0, 18, 0, 29, 0, 19, 0, 26, 0, 21, 0, 21, 0, 19, 0, 21, 0, 26, 0, 3, 0, 7, 0, 25, 0, 8, 0], [0, 33, 0, 6, 0, 7, 0, 19, 0, 34, 0, 4, 0, 18, 0, 12, 0, 0, 0, 19, 0, 24, 0, 25, 0, 22, 0, 9, 0, 29, 0, 19, 0, 20, 0, 22, 0, 31, 0, 19, 0, 16, 0, 4, 0, 17, 0, 13, 0, 8, 0, 19, 0, 22, 0, 32, 0, 7, 0, 25, 0, 19, 0, 33, 0, 6, 0, 7, 0, 19, 0, 21, 0, 26, 0, 2, 0, 3, 0, 19, 0, 5, 0, 22, 0, 37, 0, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [0, 9, 0, 7, 0, 19, 0, 4, 0, 8, 0, 7, 0, 19, 0, 0, 0, 19, 0, 26, 0, 8, 0, 19, 0, 22, 0, 4, 0, 25, 0, 19, 0, 13, 0, 26, 0, 5, 0, 5, 0, 18, 0, 29, 0, 37, 0, 19, 0, 33, 0, 22, 0, 0, 0, 7, 0, 29, 0, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } tokenizer_classes = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class) for tokenizer_class in tokenizer_classes: tokenizer = tokenizer_class.from_pretrained( "facebook/mms-tts-eng", revision="28cedf176aa99de5023a4344fd8a2cc477126fb8", pad_token="<pad>", ) encoding = tokenizer(sequences, padding=True, normalize=True) decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True) for seq in encoding["input_ids"]] encoding_data = encoding.data self.assertDictEqual(encoding_data, expected_encoding) for expected, decoded in zip(normalized_sequences, decoded_sequences): self.assertEqual(expected, decoded)
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license vivit optionally rescales between 1 and 1 instead of the usual 0 and 1 initialize imageprocessing create random pil videos test not batched input test batched initialize imageprocessing create random numpy tensors test not batched input test batched initialize imageprocessing create random numpy tensors test not batched input test batched initialize imageprocessing create random pytorch tensors test not batched input test batched coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license vivit optionally rescales between 1 and 1 instead of the usual 0 and 1 initialize image_processing create random pil videos test not batched input test batched initialize image_processing create random numpy tensors test not batched input test batched initialize image_processing create random numpy tensors test not batched input test batched initialize image_processing create random pytorch tensors test not batched input test batched
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class VivitImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, num_frames=10, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], crop_size=None, ): size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_frames = num_frames self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.crop_size = crop_size def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } def expected_output_image_shape(self, images): return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_channels=self.num_channels, num_frames=self.num_frames, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class VivitImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VivitImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = VivitImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_rescale(self): image = np.arange(0, 256, 1, dtype=np.uint8).reshape(1, 8, 32) image_processor = self.image_processing_class(**self.image_processor_dict) rescaled_image = image_processor.rescale(image, scale=1 / 127.5) expected_image = (image * (1 / 127.5)).astype(np.float32) - 1 self.assertTrue(np.allclose(rescaled_image, expected_image)) rescaled_image = image_processor.rescale(image, scale=1 / 255, offset=False) expected_image = (image / 255.0).astype(np.float32) self.assertTrue(np.allclose(rescaled_image, expected_image)) def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) def test_call_numpy_4_channels(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.image_processor_tester.num_channels = 4 video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) encoded_videos = image_processing( video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) encoded_videos = image_processing( video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) )
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vivit model import copy import inspect import unittest import numpy as np from huggingfacehub import hfhubdownload from transformers import vivitconfig from transformers models auto import getvalues from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import cachedproperty istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import modelforvideoclassificationmapping vivitforvideoclassification vivitmodel from transformers models vivit modelingvivit import vivitpretrainedmodelarchivelist if isvisionavailable from transformers import vivitimageprocessor class vivitmodeltester def init self parent batchsize2 istrainingtrue uselabelstrue numlabels10 imagesize10 numframes8 decreased because default 32 takes too much ram at inference tubeletsize2 4 4 numchannels3 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelufast hiddendropoutprob0 0 attentionprobsdropoutprob0 0 initializerrange0 02 layernormeps1e06 qkvbiastrue scopenone self parent parent self batchsize batchsize self istraining istraining self uselabels uselabels self numlabels numlabels self imagesize imagesize self numframes numframes self tubeletsize tubeletsize self numchannels numchannels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self initializerrange initializerrange self layernormeps layernormeps self qkvbias qkvbias self scope scope self seqlength self imagesize self tubeletsize2 self imagesize self tubeletsize1 self numframes self tubeletsize0 1 cls token def prepareconfigandinputsself pixelvalues floatstensor self batchsize self numframes self numchannels self imagesize self imagesize labels none if self uselabels labels idstensorself batchsize self numlabels config self getconfig return config pixelvalues labels def getconfigself config vivitconfig numframesself numframes imagesizeself imagesize tubeletsizeself tubeletsize numchannelsself numchannels hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob initializerrangeself initializerrange layernormepsself layernormeps qkvbiasself qkvbias config numlabels self numlabels return config def createandcheckmodelself config pixelvalues labels model vivitmodelconfigconfig model totorchdevice model eval result modelpixelvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def createandcheckforvideoclassificationself config pixelvalues labels model vivitforvideoclassificationconfig model totorchdevice model eval result modelpixelvalues verify the logits shape expectedshape torch sizeself batchsize self numlabels self parent assertequalresult logits shape expectedshape def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues labels configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class vivitmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses vivitmodel vivitforvideoclassification if istorchavailable else pipelinemodelmapping featureextraction vivitmodel videoclassification vivitforvideoclassification if istorchavailable else testpruning false testtorchscript false testresizeembeddings false testheadmasking false def setupself self modeltester vivitmodeltesterself self configtester configtesterself configclassvivitconfig hastextmodalityfalse hiddensize37 def prepareforclassself inputsdict modelclass returnlabelsfalse inputsdict copy deepcopyinputsdict if returnlabels if modelclass in getvaluesmodelforvideoclassificationmapping inputsdictlabels torch zeros self modeltester batchsize dtypetorch long devicetorchdevice return inputsdict def testconfigself self configtester runcommontests unittest skipreasonvivit does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues headmask self assertlistequalargnames 2 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testforvideoclassificationself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckforvideoclassificationconfigandinputs slow def testmodelfrompretrainedself for modelname in vivitpretrainedmodelarchivelist 1 model vivitmodel frompretrainedmodelname self assertisnotnonemodel def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true for modelclass in self allmodelclasses seqlen self modeltester seqlength inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads seqlen seqlen outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads seqlen seqlen def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs hiddenstates expectednumlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers seqlength self modeltester seqlength self assertlistequal listhiddenstates0 shape2 seqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass we will verify our results on a video of eating spaghetti frame indices used 164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227 def preparevideo file hfhubdownload repoidhfinternaltestingspaghettivideo filenameeatingspaghetti32frames npy repotypedataset video np loadfile return listvideo requiretorch requirevision class vivitmodelintegrationtestunittest testcase cachedproperty def defaultimageprocessorself return vivitimageprocessor if isvisionavailable else none slow def testinferenceforvideoclassificationself model vivitforvideoclassification frompretrainedgooglevivitb16x2kinetics400 totorchdevice imageprocessor self defaultimageprocessor video preparevideo inputs imageprocessorvideo returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 400 self assertequaloutputs logits shape expectedshape taken from original model expectedslice torch tensor0 9498 2 7971 1 4049 0 1024 1 8353 totorchdevice self asserttruetorch allcloseoutputs logits0 5 expectedslice atol1e4 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch vivit model decreased because default 32 takes too much ram at inference cls token verify the logits shape here we also overwrite some of the tests of test_modeling_common py as vivit does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic check that output_attentions also work using config check attention is always last and order is fine check that output_hidden_states also work using config we will verify our results on a video of eating spaghetti frame indices used 164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227 forward pass verify the logits taken from original model
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VivitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VivitForVideoClassification, VivitModel from transformers.models.vivit.modeling_vivit import VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VivitImageProcessor class VivitModelTester: def __init__( self, parent, batch_size=2, is_training=True, use_labels=True, num_labels=10, image_size=10, num_frames=8, tubelet_size=[2, 4, 4], num_channels=3, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu_fast", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.image_size = image_size self.num_frames = num_frames self.tubelet_size = tubelet_size self.num_channels = num_channels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.scope = scope self.seq_length = ( (self.image_size // self.tubelet_size[2]) * (self.image_size // self.tubelet_size[1]) * (self.num_frames // self.tubelet_size[0]) ) + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): config = VivitConfig( num_frames=self.num_frames, image_size=self.image_size, tubelet_size=self.tubelet_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, qkv_bias=self.qkv_bias, ) config.num_labels = self.num_labels return config def create_and_check_model(self, config, pixel_values, labels): model = VivitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_video_classification(self, config, pixel_values, labels): model = VivitForVideoClassification(config) model.to(torch_device) model.eval() result = model(pixel_values) expected_shape = torch.Size((self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape, expected_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VivitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (VivitModel, VivitForVideoClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": VivitModel, "video-classification": VivitForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VivitModelTester(self) self.config_tester = ConfigTester(self, config_class=VivitConfig, has_text_modality=False, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Vivit does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "head_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_video_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = VivitModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: seq_len = self.model_tester.seq_length inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class VivitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return VivitImageProcessor() if is_vision_available() else None @slow def test_inference_for_video_classification(self): model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.9498, 2.7971, -1.4049, 0.1024, -1.8353]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4))
codingutf8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper testfeatureextractionwhisper floatslist creates a random float32 tensor if rng is none rng globalrng values for batchidx in rangeshape0 values append for in rangeshape1 values1 appendrng random scale return values class wav2vec2featureextractiontesterunittest testcase def init self parent batchsize7 minseqlength400 maxseqlength2000 featuresize1 paddingvalue0 0 samplingrate16000 returnattentionmasktrue donormalizetrue self parent parent self batchsize batchsize self minseqlength minseqlength self maxseqlength maxseqlength self seqlengthdiff self maxseqlength self minseqlength self batchsize 1 self featuresize featuresize self paddingvalue paddingvalue self samplingrate samplingrate self returnattentionmask returnattentionmask self donormalize donormalize def preparefeatextractdictself return featuresize self featuresize paddingvalue self paddingvalue samplingrate self samplingrate returnattentionmask self returnattentionmask donormalize self donormalize def prepareinputsforcommonself equallengthfalse numpifyfalse def flattenlistoflists return listitertools chainlistoflists if equallength speechinputs floatslistself batchsize self maxseqlength else make sure that inputs increase in size speechinputs flattenfloatslistx self featuresize for x in rangeself minseqlength self maxseqlength self seqlengthdiff if numpify speechinputs np asarrayx for x in speechinputs return speechinputs class wav2vec2featureextractiontestsequencefeatureextractiontestmixin unittest testcase featureextractionclass wav2vec2featureextractor def setupself self featextracttester wav2vec2featureextractiontesterself def checkzeromeanunitvarianceself inputvector self asserttruenp allnp meaninputvector axis0 1e3 self asserttruenp allnp absnp varinputvector axis0 1 1e3 def testcallself tests that all call wrap to encodeplus and batchencodeplus featextract self featureextractionclassself featextracttester preparefeatextractdict create three inputs of length 800 1000 and 1200 speechinputs floatslist1 x0 for x in range800 1400 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs test not batched input encodedsequences1 featextractspeechinputs0 returntensorsnp inputvalues encodedsequences2 featextractnpspeechinputs0 returntensorsnp inputvalues self asserttruenp allcloseencodedsequences1 encodedsequences2 atol1e3 test batched encodedsequences1 featextractspeechinputs returntensorsnp inputvalues encodedsequences2 featextractnpspeechinputs returntensorsnp inputvalues for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test 2d numpy arrays are batched speechinputs floatslist1 x0 for x in 800 800 800 npspeechinputs np asarrayspeechinputs encodedsequences1 featextractspeechinputs returntensorsnp inputvalues encodedsequences2 featextractnpspeechinputs returntensorsnp inputvalues for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 def testzeromeanunitvariancenormalizationnpself featextract self featureextractionclassself featextracttester preparefeatextractdict speechinputs floatslist1 x0 for x in range800 1400 200 paddings longest maxlength donotpad maxlengths none 1600 none for maxlength padding in zipmaxlengths paddings processed featextractspeechinputs paddingpadding maxlengthmaxlength returntensorsnp inputvalues processed inputvalues self checkzeromeanunitvarianceinputvalues0 800 self asserttrueinputvalues0800 sum 1e6 self checkzeromeanunitvarianceinputvalues1 1000 self asserttrueinputvalues01000 sum 1e6 self checkzeromeanunitvarianceinputvalues2 1200 def testzeromeanunitvariancenormalizationself featextract self featureextractionclassself featextracttester preparefeatextractdict lengths range800 1400 200 speechinputs floatslist1 x0 for x in lengths paddings longest maxlength donotpad maxlengths none 1600 none for maxlength padding in zipmaxlengths paddings processed featextractspeechinputs maxlengthmaxlength paddingpadding inputvalues processed inputvalues self checkzeromeanunitvarianceinputvalues0 800 self checkzeromeanunitvarianceinputvalues1 1000 self checkzeromeanunitvarianceinputvalues2 1200 def testzeromeanunitvariancenormalizationtruncnpmaxlengthself featextract self featureextractionclassself featextracttester preparefeatextractdict speechinputs floatslist1 x0 for x in range800 1400 200 processed featextract speechinputs truncationtrue maxlength1000 paddingmaxlength returntensorsnp inputvalues processed inputvalues self checkzeromeanunitvarianceinputvalues0 800 self checkzeromeanunitvarianceinputvalues1 self checkzeromeanunitvarianceinputvalues2 def testzeromeanunitvariancenormalizationtruncnplongestself featextract self featureextractionclassself featextracttester preparefeatextractdict speechinputs floatslist1 x0 for x in range800 1400 200 processed featextract speechinputs truncationtrue maxlength1000 paddinglongest returntensorsnp inputvalues processed inputvalues self checkzeromeanunitvarianceinputvalues0 800 self checkzeromeanunitvarianceinputvalues1 1000 self checkzeromeanunitvarianceinputvalues2 make sure that if maxlength longest then pad to maxlength self asserttrueinputvalues shape 3 1000 speechinputs floatslist1 x0 for x in range800 1400 200 processed featextract speechinputs truncationtrue maxlength2000 paddinglongest returntensorsnp inputvalues processed inputvalues self checkzeromeanunitvarianceinputvalues0 800 self checkzeromeanunitvarianceinputvalues1 1000 self checkzeromeanunitvarianceinputvalues2 make sure that if maxlength longest then pad to longest self asserttrueinputvalues shape 3 1200 requiretorch def testdoubleprecisionpadself import torch featureextractor self featureextractionclassself featextracttester preparefeatextractdict npspeechinputs np random rand100 astypenp float64 pyspeechinputs npspeechinputs tolist for inputs in pyspeechinputs npspeechinputs npprocessed featureextractor padinputvalues inputs returntensorsnp self asserttruenpprocessed inputvalues dtype np float32 ptprocessed featureextractor padinputvalues inputs returntensorspt self asserttrueptprocessed inputvalues dtype torch float32 slow requiretorch def testpretrainedcheckpointsaresetcorrectlyself this test makes sure that models that are using group norm don t have their feature extractor return the attentionmask for modelid in wav2vec2pretrainedmodelarchivelist config wav2vec2config frompretrainedmodelid featextract wav2vec2featureextractor frompretrainedmodelid only layer feature extraction norm should make use of attentionmask self assertequalfeatextract returnattentionmask config featextractnorm layer coding utf 8 2021 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor make sure that inputs increase in size tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test not batched input test batched test 2 d numpy arrays are batched make sure that if max_length longest then pad to max_length make sure that if max_length longest then pad to longest this test makes sure that models that are using group norm don t have their feature extractor return the attention_mask only layer feature extraction norm should make use of attention_mask
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2Config, Wav2Vec2FeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class Wav2Vec2FeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=16000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs class Wav2Vec2FeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Wav2Vec2FeatureExtractor def setUp(self): self.feat_extract_tester = Wav2Vec2FeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3)) def test_call(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] encoded_sequences_1 = feat_extract(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_zero_mean_unit_variance_normalization_np(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, padding=padding, max_length=max_length, return_tensors="np") input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self.assertTrue(input_values[0][800:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[1][:1000]) self.assertTrue(input_values[0][1000:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) lengths = range(800, 1400, 200) speech_inputs = [floats_list((1, x))[0] for x in lengths] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, max_length=max_length, padding=padding) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self._check_zero_mean_unit_variance(input_values[1][:1000]) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization_trunc_np_max_length(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="max_length", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1]) self._check_zero_mean_unit_variance(input_values[2]) def test_zero_mean_unit_variance_normalization_trunc_np_longest(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) self.assertTrue(input_values.shape == (3, 1000)) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=2000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) self.assertTrue(input_values.shape == (3, 1200)) @require_torch def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) @slow @require_torch def test_pretrained_checkpoints_are_set_correctly(self): for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: config = Wav2Vec2Config.from_pretrained(model_id) feat_extract = Wav2Vec2FeatureExtractor.from_pretrained(model_id) self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == "layer")
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license use a spawn pool which should trigger a warning if different than fork force batchdecode to internally create a spawn pool which should trigger a warning if different than fork overwrite because of inputvalues signature parameters is an ordereddict so argnames order is deterministic overwrite because of inputvalues dummy loss function compute cosine similarity of projected and projectedquantized states transform the loss function to get the gradients compute loss outputs and gradients for unfrozen model compare to loss outputs and gradients for frozen model ensure that the outputs and losses remain precisely equal ensure that the dicts of gradients contain the same keys ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain nonzero entries when unfrozen ensure that the gradients of all unfrozen layers remain equal i e all layers excluding the frozen featureextractor because of overlap mask don t have to add up exactly to maskprob sequencelength but have to be smaller or equal mask half of the input take negative vectors from sampled indices make sure no negatively sampled vector is actually a positive one make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hiddensize dim second half of last input tensor is padded make sure that no padding tokens are sampled take negative vectors from sampled indices make sure no negatively sampled vector is actually a positive one make sure that full vectors are sampled and not just slices of vectors this means that unique yields a single value for hiddensize dim automatic decoding with librispeech compute cosine similarity retrieve cosine sim of masked features now compare to randomly initialized model compute cosine similarity retrieve cosine sim of masked features a pretrained wav2vec2 model has learned to predict the quantized latent states the cosine similarity between quantized states and predicted states 0 5 a random wav2vec2 model has not learned to predict the quantized latent states the cosine similarity between quantized states and predicted states is very likely 0 1 test usermanaged pool usermanaged pool numprocesses should trigger a warning 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license use a spawn pool which should trigger a warning if different than fork force batch_decode to internally create a spawn pool which should trigger a warning if different than fork speech is longer this is most likely not correctly set yet overwrite because of input_values signature parameters is an ordereddict so arg_names order is deterministic overwrite because of input_values dummy loss function compute cosine similarity of projected and projected_quantized states transform the loss function to get the gradients compute loss outputs and gradients for unfrozen model compare to loss outputs and gradients for frozen model ensure that the outputs and losses remain precisely equal ensure that the dicts of gradients contain the same keys ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain non zero entries when unfrozen ensure that the gradients of all unfrozen layers remain equal i e all layers excluding the frozen feature_extractor because of overlap mask don t have to add up exactly to mask_prob sequence_length but have to be smaller or equal mask half of the input each value in vector consits of same value btc bxt c take negative vectors from sampled indices make sure no negatively sampled vector is actually a positive one make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hidden_size dim each value in vector consits of same value second half of last input tensor is padded make sure that no padding tokens are sampled btc bxt c take negative vectors from sampled indices make sure no negatively sampled vector is actually a positive one make sure that full vectors are sampled and not just slices of vectors this means that unique yields a single value for hidden_size dim automatic decoding with librispeech compute cosine similarity retrieve cosine sim of masked features now compare to randomly initialized model compute cosine similarity retrieve cosine sim of masked features a pretrained wav2vec2 model has learned to predict the quantized latent states the cosine similarity between quantized states and predicted states 0 5 a random wav2vec2 model has not learned to predict the quantized latent states the cosine similarity between quantized states and predicted states is very likely 0 1 test user managed pool user managed pool num_processes should trigger a warning
import inspect import math import multiprocessing import traceback import unittest import numpy as np from datasets import load_dataset from transformers import Wav2Vec2Config, is_flax_available from transformers.testing_utils import ( CaptureLogger, is_flaky, is_librosa_available, is_pt_flax_cross_test, is_pyctcdecode_available, require_flax, require_librosa, require_pyctcdecode, require_soundfile, run_test_in_subprocess, slow, ) from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp import optax from flax.traverse_util import flatten_dict from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Processor from transformers.models.wav2vec2.modeling_flax_wav2vec2 import ( FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining, FlaxWav2Vec2GumbelVectorQuantizer, FlaxWav2Vec2Model, _compute_mask_indices, _sample_negative_indices, ) if is_pyctcdecode_available(): import pyctcdecode.decoder from transformers import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm import processing_wav2vec2_with_lm if is_librosa_available(): import librosa def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = librosa.resample(sample["audio"]["array"], 48_000, 16_000) model = FlaxWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="np").input_values logits = model(input_values).logits with CaptureLogger(pyctcdecode.decoder.logger) as cl, multiprocessing.get_context("spawn").Pool(1) as pool: transcription = processor.batch_decode(np.array(logits), pool).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") multiprocessing.set_start_method("spawn", force=True) with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl: transcription = processor.batch_decode(np.array(logits)).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class FlaxWav2Vec2ModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=24, feat_extract_norm="layer", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = Wav2Vec2Config( do_stable_layer_norm=self.do_stable_layer_norm, hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, ) return config, input_values, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_values, attention_mask = config_and_inputs inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_flax class FlaxWav2Vec2ModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( (FlaxWav2Vec2Model, FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxWav2Vec2ModelTester(self) def test_train(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] model = FlaxWav2Vec2ForPreTraining(config) features_shape = ( input_values.shape[0], model._get_feat_extract_output_lengths(np.array(input_values.shape[1])), ) batch_size, sequence_length = features_shape[:2] mask_prob = 0.5 mask_length = 4 mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) dropout_rng, gumbel_rng = jax.random.split(jax.random.PRNGKey(0)) output = model( input_values, attention_mask=attention_mask, mask_time_indices=mask_time_indices, train=True, dropout_rng=dropout_rng, gumbel_rng=gumbel_rng, )[0] self.assertTrue(output.shape == (batch_size, sequence_length, model.config.proj_codevector_dim)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values", "attention_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_values, attention_mask=None, **kwargs): return model(input_values=input_values, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_freeze_feature_encoder(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] model = FlaxWav2Vec2ForPreTraining(config) params = model.params def compute_loss( params, input_values, attention_mask, freeze_feature_encoder: bool = False, epsilon: float = 1e-8 ): outputs = model( input_values, attention_mask=attention_mask, freeze_feature_encoder=freeze_feature_encoder, params=params, ) cosine_sim = optax.cosine_similarity( outputs.projected_states, outputs.projected_quantized_states, epsilon=epsilon ) loss = cosine_sim.sum() return loss, outputs.to_tuple() grad_fn = jax.value_and_grad(compute_loss, has_aux=True) (loss, outputs), grads = grad_fn(params, input_values, attention_mask, freeze_feature_encoder=False) (loss_frozen, outputs_frozen), grads_frozen = grad_fn( params, input_values, attention_mask, freeze_feature_encoder=True ) for output, output_frozen in zip(outputs, outputs_frozen): self.assertTrue((output == output_frozen).all()) self.assertEqual(loss, loss_frozen) grads = flatten_dict(grads) grads_frozen = flatten_dict(grads_frozen) self.assertEqual(grads.keys(), grads_frozen.keys()) feature_extractor_grads = tuple(grads[k] for k in grads if "feature_extractor" in k) feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" in k) for feature_extractor_grad, feature_extractor_grad_frozen in zip( feature_extractor_grads, feature_extractor_grads_frozen ): self.assertTrue((feature_extractor_grad_frozen == 0.0).all()) self.assertTrue((feature_extractor_grad > 0.0).any()) grads = tuple(grads[k] for k in grads if "feature_extractor" not in k) grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" not in k) for grad, grad_frozen in zip(grads, grads_frozen): self.assertTrue((grad == grad_frozen).all()) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True) outputs = model(np.ones((1, 1024), dtype="f4")) self.assertIsNotNone(outputs) @is_pt_flax_cross_test @is_flaky() def test_equivalence_pt_to_flax(self): super().test_equivalence_pt_to_flax() @require_flax class FlaxWav2Vec2UtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = np.ones((batch_size, sequence_length), dtype=np.int32) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_perplexity(self): probs = np.arange(100).reshape(2, 5, 10) / 100 ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) mask = np.ones((2,), dtype=bool) mask[0] = 0 ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape( sequence_length, hidden_size ) features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size)) negative_indices = _sample_negative_indices(features.shape, num_negatives) features = features.reshape(-1, hidden_size) sampled_negatives = features[negative_indices.reshape(-1)] negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose( 2, 0, 1, 3 ) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) for negative in negatives: self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0) self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_attn_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape( sequence_length, hidden_size ) attention_mask = np.ones((batch_size, sequence_length), dtype=np.int8) attention_mask[-1, sequence_length // 2 :] = 0 forbidden_indices = ( np.arange(sequence_length // 2, sequence_length, dtype=np.int32) + (batch_size - 1) * sequence_length ).tolist() features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size)) negative_indices = _sample_negative_indices(features.shape, num_negatives, attention_mask=attention_mask) self.assertTrue(all(idx not in negative_indices for idx in forbidden_indices)) features = features.reshape(-1, hidden_size) sampled_negatives = features[negative_indices.reshape(-1)] negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose( 2, 0, 1, 3 ) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) for negative in negatives: self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0) self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_flax @require_soundfile @slow class FlaxWav2Vec2ModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_ctc_robust_batched(self): model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="np", padding=True) input_values = inputs.input_values attention_mask = inputs.attention_mask logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = jnp.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_pretrained(self): model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60", from_pt=True) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-large-lv60", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="np", padding=True) features_shape = ( inputs_dict["input_values"].shape[0], model._get_feat_extract_output_lengths(np.array(inputs_dict["input_values"].shape[1])), ) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) outputs = model( inputs_dict.input_values, attention_mask=inputs_dict.attention_mask, mask_time_indices=mask_time_indices, ) cosine_sim = optax.cosine_similarity( outputs.projected_states, outputs.projected_quantized_states, epsilon=1e-8 ) cosine_sim_masked = cosine_sim[mask_time_indices] config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-large-lv60") model_rand = FlaxWav2Vec2ForPreTraining(config) outputs_rand = model_rand( inputs_dict.input_values, attention_mask=inputs_dict.attention_mask, mask_time_indices=mask_time_indices, ) cosine_sim_rand = optax.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states ) cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0) @require_pyctcdecode @require_librosa def test_wav2vec2_with_lm(self): ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = librosa.resample(sample["audio"]["array"], 48_000, 16_000) model = FlaxWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="np").input_values logits = model(input_values).logits transcription = processor.batch_decode(np.array(logits)).text self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") @require_pyctcdecode @require_librosa def test_wav2vec2_with_lm_pool(self): ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = librosa.resample(sample["audio"]["array"], 48_000, 16_000) model = FlaxWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="np").input_values logits = model(input_values).logits with multiprocessing.get_context("fork").Pool(2) as pool: transcription = processor.batch_decode(np.array(logits), pool).text self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl, multiprocessing.get_context("fork").Pool( 2 ) as pool: transcription = processor.batch_decode(np.array(logits), pool, num_processes=2).text self.assertIn("num_process", cl.out) self.assertIn("it will be ignored", cl.out) self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") @require_pyctcdecode @require_librosa def test_wav2vec2_with_lm_invalid_pool(self): run_test_in_subprocess(test_case=self, target_func=_test_wav2vec2_with_lm_invalid_pool, inputs=None)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license use a spawn pool which should trigger a warning if different than fork force batchdecode to internally create a spawn pool which should trigger a warning if different than fork test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 convert values that are over inputlengths to padding convert values that are over inputlengths to padding pad input freeze feature encoder overwrite because inputvalues inputids signature parameters is an ordereddict so argnames order is deterministic overwrite because inputvalues inputids todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc we override the base test here to skip loss calculation for wav2vec2 models because the loss is massive with the default labels and frequently overflows to inf or exceeds numerical tolerances between tfpt output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e4 1e9 1e30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it check we can load pt model in tf and viceversa with model model functions original test check without labels check we can load pt model in tf and viceversa with checkpoint model functions original test check without labels overwrite because inputvalues inputids signature parameters is an ordereddict so argnames order is deterministic overwrite because inputvalues inputids todo joao fix me todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc we override the base test here to skip loss calculation for wav2vec2 models because the loss is massive with the default labels and frequently overflows to inf or exceeds numerical tolerances between tfpt output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e4 1e9 1e30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it check we can load pt model in tf and viceversa with model model functions original test check without labels check we can load pt model in tf and viceversa with checkpoint model functions original test check without labels because of overlap mask don t have to add up exactly to maskprob sequencelength but have to be smaller or equal cleanup as much as possible gpu memory occupied by pytorch automatic decoding with librispeech test usermanaged pool usermanaged pool numprocesses should trigger a warning s3prl logits for the same batch coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license use a spawn pool which should trigger a warning if different than fork force batch_decode to internally create a spawn pool which should trigger a warning if different than fork this is most likely not correctly set yet test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 convert values that are over input_lengths to padding convert values that are over input_lengths to padding pad input freeze feature encoder overwrite because input_values input_ids signature parameters is an ordereddict so arg_names order is deterministic overwrite because input_values input_ids todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc we override the base test here to skip loss calculation for wav2vec2 models because the loss is massive with the default labels and frequently overflows to inf or exceeds numerical tolerances between tf pt output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e 4 1e 9 1e 30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it skip the tf at the beginning check we can load pt model in tf and vice versa with model model functions original test check without labels check we can load pt model in tf and vice versa with checkpoint model functions original test check without labels overwrite because input_values input_ids signature parameters is an ordereddict so arg_names order is deterministic overwrite because input_values input_ids todo joao fix me todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc todo amy check whether skipping ctc model resolves this issue and possible resolutions for ctc we override the base test here to skip loss calculation for wav2vec2 models because the loss is massive with the default labels and frequently overflows to inf or exceeds numerical tolerances between tf pt output all for aggressive testing make sure no sequence has all zeros as attention mask otherwise some tests fail due to the inconsistency of the usage 1e 4 1e 9 1e 30 inf todo use a uniform value for all models make sure all tests pass without this processing and remove it skip the tf at the beginning check we can load pt model in tf and vice versa with model model functions original test check without labels check we can load pt model in tf and vice versa with checkpoint model functions original test check without labels because of overlap mask don t have to add up exactly to mask_prob sequence_length but have to be smaller or equal clean up as much as possible gpu memory occupied by pytorch automatic decoding with librispeech test user managed pool user managed pool num_processes should trigger a warning s3prl logits for the same batch
from __future__ import annotations import copy import gc import glob import inspect import math import multiprocessing import os import tempfile import traceback import unittest import numpy as np import pytest from datasets import load_dataset from huggingface_hub import snapshot_download from transformers import Wav2Vec2Config, is_tf_available from transformers.testing_utils import ( CaptureLogger, is_flaky, is_pt_tf_cross_test, require_librosa, require_pyctcdecode, require_tf, run_test_in_subprocess, slow, ) from transformers.utils import is_librosa_available, is_pyctcdecode_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoFeatureExtractor, TFWav2Vec2ForCTC, TFWav2Vec2ForSequenceClassification, TFWav2Vec2Model, Wav2Vec2Processor, ) from transformers.models.wav2vec2.modeling_tf_wav2vec2 import _compute_mask_indices if is_pyctcdecode_available(): import pyctcdecode.decoder from transformers import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm import processing_wav2vec2_with_lm if is_librosa_available(): import librosa def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) downloaded_folder = snapshot_download("patrickvonplaten/common_voice_es_sample") file_path = glob.glob(downloaded_folder + "/*")[0] sample = librosa.load(file_path, sr=16_000)[0] model = TFWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(sample, return_tensors="tf").input_values logits = model(input_values).logits with CaptureLogger(pyctcdecode.decoder.logger) as cl, multiprocessing.get_context("spawn").Pool(1) as pool: transcription = processor.batch_decode(logits.numpy(), pool).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "el libro ha sido escrito por cervantes") multiprocessing.set_start_method("spawn", force=True) with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl: transcription = processor.batch_decode(logits.numpy()).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "el libro ha sido escrito por cervantes") except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() @require_tf class TFWav2Vec2ModelTester: def __init__( self, parent, batch_size=3, seq_length=1024, is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = tf.cast(ids_tensor([self.batch_size, self.seq_length], 32768), tf.float32) / 32768.0 attention_mask = tf.ones_like(input_values) config = Wav2Vec2Config( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, do_stable_layer_norm=self.do_stable_layer_norm, ) return config, input_values, attention_mask def create_and_check_model(self, config, input_values, attention_mask): model = TFWav2Vec2Model(config) result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): config.layerdrop = 0.0 model = TFWav2Vec2Model(config) input_values = input_values[:3] attention_mask = tf.ones_like(input_values) input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) length_mask = tf.sequence_mask(input_lengths, dtype=tf.float32) input_values = input_values * length_mask attention_mask = attention_mask * length_mask batch_outputs = model(input_values, attention_mask=attention_mask, training=False).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice, training=False).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(np.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = TFWav2Vec2ForCTC(config) input_values = input_values[:3] attention_mask = tf.ones_like(input_values) input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) max_length_labels = model.wav2vec2._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) length_mask = tf.sequence_mask(input_lengths, dtype=tf.float32) input_values = input_values * length_mask attention_mask = attention_mask * length_mask model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss self.parent.assertTrue(abs(labels.shape[0] * mean_loss - sum_loss) < 1e-2) def check_seq_classifier_loss(self, loss, config, input_values, *args): model = TFWav2Vec2ForSequenceClassification(config) input_values = input_values[:3] attention_mask = tf.ones(input_values.shape, dtype=tf.int32) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = tf.random.uniform((input_values.shape[0],), maxval=len(model.config.id2label), dtype=tf.int32) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 training = False masked_loss = ( model(input_values, attention_mask=attention_mask, labels=labels, training=training).loss.numpy().item() ) unmasked_loss = model(input_values, labels=labels, training=training).loss.numpy().item() assert isinstance(masked_loss, float) assert isinstance(unmasked_loss, float) assert masked_loss != unmasked_loss def check_training(self, config, input_values, *args): model = TFWav2Vec2ForCTC(config) model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) max_length_labels = model.wav2vec2._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) length_mask = tf.sequence_mask(input_lengths, dtype=tf.float32) input_values = input_values * length_mask pad_size = max(max_length_labels) - labels.shape[1] labels = tf.pad(labels, ((0, 0), (0, pad_size)), constant_values=-100) loss = model(input_values, labels=labels, training=True).loss self.parent.assertFalse(tf.math.is_inf(loss)) def check_labels_out_of_vocab(self, config, input_values, *args): model = TFWav2Vec2ForCTC(config) input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) max_length_labels = model.wav2vec2._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size + 500) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_tf class TFWav2Vec2ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFWav2Vec2Model, TFWav2Vec2ForCTC, TFWav2Vec2ForSequenceClassification) if is_tf_available() else () ) pipeline_model_mapping = ( {"audio-classification": TFWav2Vec2ForSequenceClassification, "feature-extraction": TFWav2Vec2Model} if is_tf_available() else {} ) test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFWav2Vec2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_values = inputs_keywords.pop("input_values", None) outputs_keywords = model(input_values, **inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.output_seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) @is_flaky() def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_training(*config_and_inputs) @unittest.skip(reason="Wav2Vec2 has no input embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="Wav2Vec2 has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Wav2Vec2 has no input embeddings") def test_model_common_attributes(self): pass @slow def test_model_from_pretrained(self): model = TFWav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) @unittest.skip(reason="Fix me! Wav2Vec2 hits OOM errors when loss is computed on full batch") def test_dataset_conversion(self): pass @unittest.skip(reason="Fix me! Wav2Vec2 hits OOM errors when loss is computed on full batch") def test_keras_fit(self): pass @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import torch import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) @require_tf class TFWav2Vec2RobustModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( (TFWav2Vec2Model, TFWav2Vec2ForCTC, TFWav2Vec2ForSequenceClassification) if is_tf_available() else () ) test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFWav2Vec2ModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True, scope="robust", ) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_values = inputs_keywords.pop("input_values", None) outputs_keywords = model(input_values, **inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.output_seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) @unittest.skip("Broke with TF 2.10") def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_training(*config_and_inputs) @unittest.skip(reason="Wav2Vec2 has no input embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="Wav2Vec2 has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Wav2Vec2 has no input embeddings") def test_model_common_attributes(self): pass @slow def test_model_from_pretrained(self): model = TFWav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) @unittest.skip(reason="Fix me! Wav2Vec2 hits OOM errors when loss is computed on full batch") def test_dataset_conversion(self): pass @unittest.skip(reason="Fix me! Wav2Vec2 hits OOM errors when loss is computed on full batch") def test_keras_fit(self): pass @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import torch import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) @require_tf class TFWav2Vec2UtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) self.assertListEqual( tf.reduce_sum(mask, -1).numpy().tolist(), [mask_prob * sequence_length for _ in range(batch_size)] ) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) for batch_sum in tf.reduce_sum(mask, -1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) @require_tf @slow class TFWav2Vec2ModelIntegrationTest(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_ctc_normal(self): model = TFWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="tf", sampling_rate=16000).input_values logits = model(input_values).logits predicted_ids = tf.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched(self): model = TFWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(2) input_values = processor(input_speech, return_tensors="tf", padding=True, sampling_rate=16000).input_values logits = model(input_values).logits predicted_ids = tf.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_robust_batched(self): model = TFWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self") processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="tf", padding=True, sampling_rate=16000) input_values = inputs.input_values attention_mask = inputs.attention_mask logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = tf.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @require_pyctcdecode @require_librosa def test_wav2vec2_with_lm(self): downloaded_folder = snapshot_download("patrickvonplaten/common_voice_es_sample") file_path = glob.glob(downloaded_folder + "/*")[0] sample = librosa.load(file_path, sr=16_000)[0] model = TFWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(sample, return_tensors="tf").input_values logits = model(input_values).logits transcription = processor.batch_decode(logits.numpy()).text self.assertEqual(transcription[0], "el libro ha sido escrito por cervantes") @require_pyctcdecode @require_librosa def test_wav2vec2_with_lm_pool(self): downloaded_folder = snapshot_download("patrickvonplaten/common_voice_es_sample") file_path = glob.glob(downloaded_folder + "/*")[0] sample = librosa.load(file_path, sr=16_000)[0] model = TFWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(sample, return_tensors="tf").input_values logits = model(input_values).logits with multiprocessing.get_context("fork").Pool(2) as pool: transcription = processor.batch_decode(logits.numpy(), pool).text self.assertEqual(transcription[0], "el libro ha sido escrito por cervantes") with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl, multiprocessing.get_context("fork").Pool( 2 ) as pool: transcription = processor.batch_decode(logits.numpy(), pool, num_processes=2).text self.assertIn("num_process", cl.out) self.assertIn("it will be ignored", cl.out) self.assertEqual(transcription[0], "el libro ha sido escrito por cervantes") @require_pyctcdecode @require_librosa def test_wav2vec2_with_lm_invalid_pool(self): run_test_in_subprocess(test_case=self, target_func=_test_wav2vec2_with_lm_invalid_pool, inputs=None) def test_inference_keyword_spotting(self): model = TFWav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ks", from_pt=True) processor = AutoFeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ks") input_data = self._load_superb("ks", 4) inputs = processor(input_data["speech"], return_tensors="tf", padding=True) input_values = inputs.input_values attention_mask = inputs.attention_mask outputs = model(input_values, attention_mask) predicted_logits, predicted_ids = ( tf.math.reduce_max(outputs.logits, axis=-1), tf.argmax(outputs.logits, axis=-1), ) expected_labels = [7, 6, 10, 9] expected_logits = tf.convert_to_tensor([6.1186, 11.8961, 10.2931, 6.0898]) self.assertListEqual(predicted_ids.numpy().tolist(), expected_labels) self.assertTrue(np.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_inference_intent_classification(self): model = TFWav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ic", from_pt=True) processor = AutoFeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ic") input_data = self._load_superb("ic", 4) inputs = processor(input_data["speech"], return_tensors="tf", padding=True) input_values = inputs.input_values attention_mask = inputs.attention_mask outputs = model(input_values, attention_mask=attention_mask) predicted_logits_action, predicted_ids_action = ( tf.math.reduce_max(outputs.logits[:, :6], axis=-1), tf.argmax(outputs.logits[:, :6], axis=-1), ) predicted_logits_object, predicted_ids_object = ( tf.math.reduce_max(outputs.logits[:, 6:20], axis=-1), tf.argmax(outputs.logits[:, 6:20], axis=-1), ) predicted_logits_location, predicted_ids_location = ( tf.math.reduce_max(outputs.logits[:, 20:24], axis=-1), tf.argmax(outputs.logits[:, 20:24], axis=-1), ) expected_labels_action = [0, 0, 2, 3] expected_logits_action = tf.convert_to_tensor([0.4568, 11.0848, 1.6621, 9.3841]) expected_labels_object = [3, 10, 3, 4] expected_logits_object = tf.convert_to_tensor([1.5322, 10.7094, 5.2469, 22.1318]) expected_labels_location = [0, 0, 0, 1] expected_logits_location = tf.convert_to_tensor([1.5335, 6.5096, 10.5704, 11.0569]) self.assertListEqual(predicted_ids_action.numpy().tolist(), expected_labels_action) self.assertListEqual(predicted_ids_object.numpy().tolist(), expected_labels_object) self.assertListEqual(predicted_ids_location.numpy().tolist(), expected_labels_location) self.assertTrue(np.allclose(predicted_logits_action, expected_logits_action, atol=1e-2)) self.assertTrue(np.allclose(predicted_logits_object, expected_logits_object, atol=1e-2)) self.assertTrue(np.allclose(predicted_logits_location, expected_logits_location, atol=1e-2)) def test_inference_speaker_identification(self): model = TFWav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-sid", from_pt=True) processor = AutoFeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-sid") input_data = self._load_superb("si", 4) output_logits = [] for example in input_data["speech"]: input = processor(example, return_tensors="tf", padding=True) output = model(input.input_values, attention_mask=None) output_logits.append(output.logits[0]) output_logits = tf.stack(output_logits) predicted_logits, predicted_ids = tf.math.reduce_max(output_logits, axis=-1), tf.argmax(output_logits, axis=-1) expected_labels = [251, 1, 1, 3] expected_logits = tf.convert_to_tensor([37.5627, 71.6362, 64.2419, 31.7778]) self.assertListEqual(predicted_ids.numpy().tolist(), expected_labels) self.assertTrue(np.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_inference_emotion_recognition(self): model = TFWav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-er", from_pt=True) processor = AutoFeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-er") input_data = self._load_superb("er", 4) inputs = processor(input_data["speech"], return_tensors="tf", padding=True) input_values = inputs.input_values attention_mask = inputs.attention_mask outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = ( tf.math.reduce_max(outputs.logits, axis=-1), tf.argmax(outputs.logits, axis=-1), ) expected_labels = [1, 1, 2, 2] expected_logits = tf.convert_to_tensor([2.1722, 3.0779, 8.0287, 6.6797]) self.assertListEqual(predicted_ids.numpy().tolist(), expected_labels) self.assertTrue(np.allclose(predicted_logits, expected_logits, atol=1e-2))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch wav2vec2 model import gc import math import multiprocessing import os import pickle import tempfile import traceback import unittest import numpy as np from datasets import loaddataset from transformers import wav2vec2config istorchavailable from transformers testingutils import capturelogger backendemptycache isptflaxcrosstest ispyctcdecodeavailable istorchaudioavailable requirepyctcdecode requiresoundfile requiretorch requiretorchaudio runtestinsubprocess slow torchdevice from transformers utils import istorchfxavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from safetensors torch import savefile as safesavefile from transformers import wav2vec2featureextractor wav2vec2foraudioframeclassification wav2vec2forctc wav2vec2formaskedlm wav2vec2forpretraining wav2vec2forsequenceclassification wav2vec2forxvector wav2vec2model wav2vec2processor from transformers models wav2vec2 modelingwav2vec2 import wav2vec2adapterptfile wav2vec2adaptersafefile wav2vec2gumbelvectorquantizer computemaskindices samplenegativeindices if istorchaudioavailable import torchaudio if ispyctcdecodeavailable import pyctcdecode decoder from transformers import wav2vec2processorwithlm from transformers models wav2vec2withlm import processingwav2vec2withlm if istorchfxavailable from transformers utils fx import symbolictrace def testwav2vec2withlminvalidpoolinqueue outqueue timeout error none try inqueue gettimeouttimeout ds loaddatasetmozillafoundationcommonvoice110 es splittest streamingtrue sample nextiterds resampledaudio torchaudio functional resample torch tensorsampleaudioarray 48000 16000 numpy model wav2vec2forctc frompretrainedpatrickvonplatenwav2vec2largexlsr53spanishwithlm to torchdevice processor wav2vec2processorwithlm frompretrainedpatrickvonplatenwav2vec2largexlsr53spanishwithlm inputvalues processorresampledaudio returntensorspt inputvalues with torch nograd logits modelinputvalues totorchdevice logits use a spawn pool which should trigger a warning if different than fork with captureloggerpyctcdecode decoder logger as cl multiprocessing getcontextspawn pool1 as pool transcription processor batchdecodelogits cpu numpy pool text unittest testcase assertinfalling back to sequential decoding cl out unittest testcase assertequaltranscription0 habitan aguas poco profundas y rocosas force batchdecode to internally create a spawn pool which should trigger a warning if different than fork multiprocessing setstartmethodspawn forcetrue with captureloggerprocessingwav2vec2withlm logger as cl transcription processor batchdecodelogits cpu numpy text unittest testcase assertinfalling back to sequential decoding cl out unittest testcase assertequaltranscription0 habitan aguas poco profundas y rocosas except exception error ftraceback formatexc results error error outqueue putresults timeouttimeout outqueue join class wav2vec2modeltester def init self parent batchsize13 seqlength1024 speech is longer istrainingfalse hiddensize16 featextractnormgroup featextractdropout0 0 featextractactivationgelu convdim32 32 32 convstride4 4 4 convkernel8 8 8 convbiasfalse numconvposembeddings16 numconvposembeddinggroups2 numhiddenlayers2 numattentionheads2 hiddendropoutprob0 1 this is most likely not correctly set yet intermediatesize20 layernormeps1e5 hiddenactgelu initializerrange0 02 masktimeprob0 5 masktimelength2 vocabsize32 dostablelayernormfalse numadapterlayers1 adapterstride2 tdnndim32 32 tdnnkernel5 3 tdnndilation1 2 xvectoroutputdim32 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self hiddensize hiddensize self featextractnorm featextractnorm self featextractdropout featextractdropout self featextractactivation featextractactivation self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddendropoutprob hiddendropoutprob self intermediatesize intermediatesize self layernormeps layernormeps self hiddenact hiddenact self initializerrange initializerrange self vocabsize vocabsize self dostablelayernorm dostablelayernorm self numadapterlayers numadapterlayers self adapterstride adapterstride self masktimeprob masktimeprob self masktimelength masktimelength self scope scope self tdnndim tdnndim self tdnnkernel tdnnkernel self tdnndilation tdnndilation self xvectoroutputdim xvectoroutputdim outputseqlength self seqlength for kernel stride in zipself convkernel self convstride outputseqlength outputseqlength kernel 1 stride self outputseqlength intmath ceiloutputseqlength self encoderseqlength self outputseqlength self adapteroutputseqlength self outputseqlength 1 adapterstride 1 def prepareconfigandinputsself inputvalues floatstensorself batchsize self seqlength scale1 0 attentionmask randomattentionmaskself batchsize self seqlength config self getconfig return config inputvalues attentionmask def getconfigself return wav2vec2config hiddensizeself hiddensize featextractnormself featextractnorm featextractdropoutself featextractdropout featextractactivationself featextractactivation convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias masktimeprobself masktimeprob masktimelengthself masktimelength numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddendropoutprobself hiddendropoutprob intermediatesizeself intermediatesize layernormepsself layernormeps dostablelayernormself dostablelayernorm hiddenactself hiddenact initializerrangeself initializerrange vocabsizeself vocabsize numadapterlayersself numadapterlayers adapterstrideself adapterstride tdnndimself tdnndim tdnnkernelself tdnnkernel tdnndilationself tdnndilation xvectoroutputdimself xvectoroutputdim def createandcheckmodelself config inputvalues attentionmask model wav2vec2modelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckmodelwithadapterself config inputvalues attentionmask config addadapter true model wav2vec2modelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self adapteroutputseqlength self hiddensize def createandcheckmodelwithadapterforctcself config inputvalues attentionmask config addadapter true config outputhiddensize 2 config hiddensize model wav2vec2forctcconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result logits shape self batchsize self adapteroutputseqlength self vocabsize def createandcheckmodelwithadapterprojdimself config inputvalues attentionmask config addadapter true config outputhiddensize 8 model wav2vec2modelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self adapteroutputseqlength config outputhiddensize def createandcheckmodelwithattnadapterself config inputvalues attentionmask config adapterattndim 16 model wav2vec2forctcconfigconfig self parent assertisnotnonemodel getadapters model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequalresult logits shape self batchsize self outputseqlength self vocabsize def createandcheckbatchinferenceself config inputvalues args test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 model wav2vec2modelconfigconfig model totorchdevice model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch bool inputlengths inputvalues shape1 i for i in 4 2 1 pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 0 batchoutputs modelinputvalues attentionmaskattentionmask lasthiddenstate for i in rangeinputvalues shape0 inputslice inputvaluesi i 1 inputlengthsi output modelinputslice lasthiddenstate batchoutput batchoutputsi i 1 output shape1 self parent asserttruetorch allcloseoutput batchoutput atol1e3 def checkctclossself config inputvalues args model wav2vec2forctcconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 minmaxlengthlabels 1 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 model config ctclossreduction sum sumloss modelinputvalues attentionmaskattentionmask labelslabels loss item model config ctclossreduction mean meanloss modelinputvalues attentionmaskattentionmask labelslabels loss item self parent asserttrueisinstancesumloss float self parent asserttrueisinstancemeanloss float def checkseqclassifierlossself config inputvalues args model wav2vec2forsequenceclassificationconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 maskedloss modelinputvalues attentionmaskattentionmask labelslabels loss item unmaskedloss modelinputvalues labelslabels loss item self parent asserttrueisinstancemaskedloss float self parent asserttrueisinstanceunmaskedloss float self parent asserttruemaskedloss unmaskedloss def checkctctrainingself config inputvalues args config ctczeroinfinity true model wav2vec2forctcconfigconfig model totorchdevice model train freeze feature encoder model freezefeatureencoder inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 if maxlengthlabelsi labels shape1 it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf labelsi maxlengthlabelsi 1 100 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkseqclassifiertrainingself config inputvalues args config ctczeroinfinity true model wav2vec2forsequenceclassificationconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkxvectortrainingself config inputvalues args config ctczeroinfinity true model wav2vec2forxvectorconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checklabelsoutofvocabself config inputvalues args model wav2vec2forctcconfig model totorchdevice model train inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize 100 with self parent assertraisesvalueerror modelinputvalues labelslabels def prepareconfigandinputsforcommonself config inputvalues attentionmask self prepareconfigandinputs inputsdict inputvalues inputvalues attentionmask attentionmask return config inputsdict requiretorch class wav2vec2modeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses wav2vec2forctc wav2vec2model wav2vec2formaskedlm wav2vec2forsequenceclassification wav2vec2forpretraining if istorchavailable else pipelinemodelmapping audioclassification wav2vec2forsequenceclassification automaticspeechrecognition wav2vec2forctc featureextraction wav2vec2model fillmask wav2vec2formaskedlm if istorchavailable else fxcompatible true testpruning false testheadmasking false def setupself self modeltester wav2vec2modeltesterself self configtester configtesterself configclasswav2vec2config hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelwithadapterself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterconfigandinputs def testmodelwithadapterforctcself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterforctcconfigandinputs def testmodelwithadapterprojdimself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterprojdimconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testxvectortrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkxvectortrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs wav2vec2 has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass wav2vec2 cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass wav2vec2 has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass isptflaxcrosstest nonrobust architecture does not exist in flax def testequivalenceflaxtoptself pass isptflaxcrosstest nonrobust architecture does not exist in flax def testequivalencepttoflaxself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias featureprojection projection weight featureprojection projection bias objective weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule codevectors and module codevectors is not none module codevectors data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 def testmaskfeatureprobctcself model wav2vec2forctc frompretrained hfinternaltestingtinyrandomwav2vec2 maskfeatureprob0 2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimeprobctcself model wav2vec2forctc frompretrained hfinternaltestingtinyrandomwav2vec2 masktimeprob0 2 masktimelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model wav2vec2model frompretrainedfacebookwav2vec2base960h self assertisnotnonemodel wav2vec2 cannot be torchscripted because of group norm def createandchecktorchfxtracingself config inputsdict outputlossfalse todo fix it self skiptesttorch 2 1 breaks torch fx tests for wav2vec2hubert if not istorchfxavailable or not self fxcompatible return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval inputs self prepareforclassinputsdict modelclass returnlabelsoutputloss try inputnames attentionmask bbox inputfeatures inputids inputvalues pixelvalues tokentypeids visualfeats visualpos labels inputs getlabels none startpositions inputs getstartpositions none endpositions inputs getendpositions none if labels is not none inputnames appendlabels if startpositions is not none inputnames appendstartpositions if endpositions is not none inputnames appendendpositions filteredinputs k v for k v in inputs items if k in inputnames inputnames listfilteredinputs keys modeloutput modelfilteredinputs if isinstancemodel wav2vec2forsequenceclassification and not hasattrmodel config problemtype or model config problemtype is none model config problemtype singlelabelclassification tracedmodel symbolictracemodel inputnames tracedoutput tracedmodelfilteredinputs except exception as e self failfcouldn t trace module e def flattenoutputoutput flatten for x in output if isinstancex tuple list flatten flattenoutputx elif not isinstancex torch tensor continue else flatten appendx return flatten modeloutput flattenoutputmodeloutput tracedoutput flattenoutputtracedoutput numoutputs lenmodeloutput for i in rangenumoutputs self asserttrue torch allclosemodeloutputi tracedoutputi ftraced ith output doesn t match model ith output for modelclass test that the model can be serialized and restored properly with tempfile temporarydirectory as tmpdirname pklfilename os path jointmpdirname model pkl try with openpklfilename wb as f pickle dumptracedmodel f with openpklfilename rb as f loaded pickle loadf except exception as e self failfcouldn t serialize deserialize the traced model e loadedoutput loadedfilteredinputs loadedoutput flattenoutputloadedoutput for i in rangenumoutputs self asserttrue torch allclosemodeloutputi loadedoutputi fserialized model ith output doesn t match model ith output for modelclass avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb self cleartorchjitclassregistry unittest skip need to investigate why config dostablelayernorm is set to false here when it doesn t seem to be supported def testflaxfromptsafetensorsself return requiretorch class wav2vec2robustmodeltestmodeltestermixin unittest testcase allmodelclasses wav2vec2forctc wav2vec2model wav2vec2formaskedlm wav2vec2forsequenceclassification wav2vec2forpretraining wav2vec2foraudioframeclassification wav2vec2forxvector if istorchavailable else testpruning false testheadmasking false def setupself self modeltester wav2vec2modeltester self convstride3 3 3 featextractnormlayer dostablelayernormtrue self configtester configtesterself configclasswav2vec2config hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelwithadapterself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterconfigandinputs def testmodelwithadapterprojdimself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterprojdimconfigandinputs def testmodelwithattnadapterself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithattnadapterconfigandinputs def testbatchedinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckbatchinferenceconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testxvectortrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkxvectortrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs wav2vec2 has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass wav2vec2 cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass wav2vec2 has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias featureprojection projection weight featureprojection projection bias objective weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule codevectors and module codevectors is not none module codevectors data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 def testmodelforpretrainingself config inputsdict self modeltester prepareconfigandinputsforcommon model wav2vec2forpretrainingconfig totorchdevice batchsize inputsdictinputvalues shape0 featureseqlength intmodel getfeatextractoutputlengthsinputsdictinputvalues shape1 featuresshape batchsize featureseqlength masktimeindices computemaskindices featuresshape model config masktimeprob model config masktimelength minmasks2 samplednegativeindices samplenegativeindicesfeaturesshape 10 masktimeindices masktimeindices torch fromnumpymasktimeindices totorchdevice samplednegativeindices torch fromnumpysamplednegativeindices totorchdevice loss model inputsdictinputvalues attentionmaskinputsdictattentionmask masktimeindicesmasktimeindices samplednegativeindicessamplednegativeindices loss more losses masktimeindices masktimeindices shape1 2 true samplednegativeindices samplenegativeindicesfeaturesshape 10 masktimeindices cpu numpy samplednegativeindices torch fromnumpysamplednegativeindices totorchdevice lossmoremasked model inputsdictinputvalues attentionmaskinputsdictattentionmask masktimeindicesmasktimeindices samplednegativeindicessamplednegativeindices loss lossmoremasked has to be bigger or equal loss since more masked inputs have to be predicted self asserttrueloss detach item lossmoremasked detach item def testmaskfeatureprobctcself model wav2vec2forctc frompretrained hfinternaltestingtinyrandomwav2vec2 maskfeatureprob0 2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimeprobctcself model wav2vec2forctc frompretrained hfinternaltestingtinyrandomwav2vec2 masktimeprob0 2 masktimelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimefeatureprobctcsinglebatchself model wav2vec2forctc frompretrained hfinternaltestingtinyrandomwav2vec2 masktimeprob0 2 maskfeatureprob0 2 masktimelength2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue batchdurationinseconds 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 1 1498 32 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass def testloadandsetattnadapterself processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue def getlogitsmodel inputfeatures model model totorchdevice batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt with torch nograd logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits return logits inputfeatures np random random16000 s for s in 1 3 2 6 model wav2vec2forctc frompretrainedhfinternaltestingtinyrandomwav2vec2adapter targetlangit logits getlogitsmodel inputfeatures model2 wav2vec2forctc frompretrainedhfinternaltestingtinyrandomwav2vec2adapter model2 loadadapterit logits2 getlogitsmodel2 inputfeatures self asserttruetorch allcloselogits logits2 atol1e3 test that loading adapter weights with mismatched vocab sizes can be loaded def testloadtargetlangwithmismatchedsizeself processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue def getlogitsmodel inputfeatures model model totorchdevice batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt with torch nograd logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits return logits inputfeatures np random random16000 s for s in 1 3 2 6 model wav2vec2forctc frompretrained hfinternaltestingtinyrandomwav2vec2adapter targetlangfr ignoremismatchedsizestrue logits getlogitsmodel inputfeatures model2 wav2vec2forctc frompretrainedhfinternaltestingtinyrandomwav2vec2adapter model2 loadadapterfr logits2 getlogitsmodel2 inputfeatures self asserttruetorch allcloselogits logits2 atol1e3 def testloadattnadapterself processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2 returnattentionmasktrue def getlogitsmodel inputfeatures model model totorchdevice batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt with torch nograd logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits return logits inputfeatures np random random16000 s for s in 1 3 2 6 model wav2vec2forctc frompretrainedhfinternaltestingtinyrandomwav2vec2 adapterattndim16 with tempfile temporarydirectory as tempdir model savepretrainedtempdir model wav2vec2forctc frompretrainedtempdir logits getlogitsmodel inputfeatures adapterweights model getadapters save safe weights safefilepath os path jointempdir wav2vec2adaptersafefile formateng safesavefileadapterweights safefilepath metadataformat pt model loadadaptereng model loadadaptereng usesafetensorstrue with self assertraisesoserror model loadadaptereng usesafetensorsfalse with self assertraisesexception model loadadapterita usesafetensorstrue logits2 getlogitsmodel inputfeatures self asserttruetorch allcloselogits logits2 atol1e3 with tempfile temporarydirectory as tempdir model savepretrainedtempdir model wav2vec2forctc frompretrainedtempdir logits getlogitsmodel inputfeatures adapterweights model getadapters save pt weights ptfilepath os path jointempdir wav2vec2adapterptfile formateng torch saveadapterweights ptfilepath model loadadaptereng model loadadaptereng usesafetensorsfalse with self assertraisesoserror model loadadaptereng usesafetensorstrue logits2 getlogitsmodel inputfeatures self asserttruetorch allcloselogits logits2 atol1e3 model wav2vec2forctc frompretrainedhfinternaltestingtinyrandomwav2vec2adapter logits getlogitsmodel inputfeatures model loadadaptereng model loadadaptereng usesafetensorsfalse model loadadaptereng usesafetensorstrue logits2 getlogitsmodel inputfeatures self asserttruetorch allcloselogits logits2 atol1e3 slow def testmodelfrompretrainedself model wav2vec2model frompretrainedfacebookwav2vec2base960h self assertisnotnonemodel requiretorch class wav2vec2utilstestunittest testcase def testcomputemaskindicesself batchsize 4 sequencelength 60 maskprob 0 5 masklength 1 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice self assertlistequalmask sumaxis1 tolist maskprob sequencelength for in rangebatchsize def testcomputemaskindiceslowprobself with these settings nummaskedspans0 5 which means probabilistic rounding ensures that in 5 out of 10 method calls nummaskedspans0 and in the other 5 out of 10 cases nummaskedspans1 ntrials 100 batchsize 4 sequencelength 100 maskprob 0 05 masklength 10 countdimensionsmasked 0 countdimensionsnotmasked 0 for in rangentrials mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice nummasks torch summask item if nummasks 0 countdimensionsmasked 1 else countdimensionsnotmasked 1 as we test for at least 10 masked dimension and at least 10 nonmasked dimension this test could fail with probability p100 coin flips at most 9 heads 1 66e18 self assertgreatercountdimensionsmasked intntrials 0 1 self assertgreatercountdimensionsnotmasked intntrials 0 1 def testcomputemaskindicesoverlapself batchsize 4 sequencelength 80 maskprob 0 5 masklength 4 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice because of overlap mask don t have to add up exactly to maskprob sequencelength but have to be smaller or equal for batchsum in mask sumaxis1 self asserttrueintbatchsum maskprob sequencelength def testcomputemaskindicesattnmaskoverlapself batchsize 4 sequencelength 80 maskprob 0 5 masklength 4 attentionmask torch onesbatchsize sequencelength dtypetorch long devicetorchdevice attentionmask 2 sequencelength 2 0 mask computemaskindices batchsize sequencelength maskprob masklength attentionmaskattentionmask mask torch fromnumpymask totorchdevice for batchsum in mask sumaxis1 self asserttrueintbatchsum maskprob sequencelength self asserttruemask 2 sequencelength 2 sum 0 def testcomputemaskindicesshortaudioself batchsize 4 sequencelength 100 maskprob 0 05 masklength 10 attentionmask torch onesbatchsize sequencelength dtypetorch long devicetorchdevice force one example to be heavily padded attentionmask0 5 0 mask computemaskindices batchsize sequencelength maskprob masklength attentionmaskattentionmask minmasks2 make sure that nonpadded examples cannot be padded self assertfalsemask0attentionmask0 totorch bool cpu any def testcomputeperplexityself probs torch arange100 devicetorchdevice reshape2 5 10 100 ppl wav2vec2gumbelvectorquantizer computeperplexityprobs self asserttrueabsppl item 141 4291 1e3 mask half of the input mask torch ones2 devicetorchdevice dtypetorch bool mask0 0 ppl wav2vec2gumbelvectorquantizer computeperplexityprobs mask self asserttrueabsppl item 58 6757 1e3 def testsamplenegativesself batchsize 2 sequencelength 10 hiddensize 4 numnegatives 3 sequence torch div torch arangesequencelength hiddensize devicetorchdevice hiddensize roundingmodefloor features sequence viewsequencelength hiddensize each value in vector consits of same value features featuresnone expandbatchsize sequencelength hiddensize contiguous sample negative indices samplednegativeindices samplenegativeindicesbatchsize sequencelength numnegatives none samplednegativeindices torch fromnumpysamplednegativeindices totorchdevice negatives features view1 hiddensizesamplednegativeindices long view1 negatives negatives viewbatchsize sequencelength 1 hiddensize permute2 0 1 3 self asserttruenegatives shape numnegatives batchsize sequencelength hiddensize make sure no negatively sampled vector is actually a positive one for negative in negatives self asserttruenegative features 0 sum 0 0 make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hiddensize dim self assertequalnegatives uniquedim1 shape numnegatives batchsize sequencelength 1 def testsamplenegativeswithmaskself batchsize 2 sequencelength 10 hiddensize 4 numnegatives 3 second half of last input tensor is padded mask torch onesbatchsize sequencelength dtypetorch long devicetorchdevice mask1 sequencelength 2 0 sequence torch div torch arangesequencelength hiddensize devicetorchdevice hiddensize roundingmodefloor features sequence viewsequencelength hiddensize each value in vector consits of same value features featuresnone expandbatchsize sequencelength hiddensize contiguous replace masked feature vectors with 100 to test that those are not sampled features torch wheremask none expandfeatures shape bool features 100 sample negative indices samplednegativeindices samplenegativeindices batchsize sequencelength numnegatives mask cpu numpy samplednegativeindices torch fromnumpysamplednegativeindices totorchdevice negatives features view1 hiddensizesamplednegativeindices long view1 negatives negatives viewbatchsize sequencelength 1 hiddensize permute2 0 1 3 self asserttruenegatives 0 all item self asserttruenegatives shape numnegatives batchsize sequencelength hiddensize make sure no negatively sampled vector is actually a positive one for negative in negatives self asserttruenegative features 0 sum 0 0 make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hiddensize dim self assertequalnegatives uniquedim1 shape numnegatives batchsize sequencelength 1 requiretorch requiresoundfile slow class wav2vec2modelintegrationtestunittest testcase def teardownself super teardown cleanup as much as possible gpu memory occupied by pytorch gc collect backendemptycachetorchdevice def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid filter lambda x xid in f1272141231000i for i in rangenumsamples numsamplesaudio return xarray for x in speechsamples def loadsuperbself task numsamples ds loaddatasetantonlsuperbdummy task splittest return ds numsamples def testinferencectcnormalself model wav2vec2forctc frompretrainedfacebookwav2vec2base960h model totorchdevice processor wav2vec2processor frompretrainedfacebookwav2vec2base960h dolowercasetrue inputspeech self loaddatasamples1 inputvalues processorinputspeech returntensorspt inputvalues totorchdevice with torch nograd logits modelinputvalues logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist self assertlistequalpredictedtrans expectedtranscriptions def testinferencectcnormalbatchedself model wav2vec2forctc frompretrainedfacebookwav2vec2base960h model totorchdevice processor wav2vec2processor frompretrainedfacebookwav2vec2base960h dolowercasetrue inputspeech self loaddatasamples2 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice with torch nograd logits modelinputvalues logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist sweat covered brion s body trickling into the tight lowing cloth that was the only garment he wore self assertlistequalpredictedtrans expectedtranscriptions def testinferencectcrobustbatchedself model wav2vec2forctc frompretrainedfacebookwav2vec2large960hlv60self totorchdevice processor wav2vec2processor frompretrainedfacebookwav2vec2large960hlv60self dolowercasetrue inputspeech self loaddatasamples4 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd logits modelinputvalues attentionmaskattentionmask logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist sweat covered brion s body trickling into the tight loin cloth that was the only garment he wore the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about his instant panic was followed by a small sharp blow high on his chest self assertlistequalpredictedtrans expectedtranscriptions unittest skipiftorchdevice cpu cannot make deterministic on gpu def testinferenceintegrationself model wav2vec2forpretraining frompretrainedfacebookwav2vec2base model totorchdevice featureextractor wav2vec2featureextractor frompretrainedfacebookwav2vec2base inputspeech self loaddatasamples2 inputsdict featureextractorinputspeech returntensorspt paddingtrue batchsize inputsdictinputvalues shape0 featureseqlength intmodel getfeatextractoutputlengthsinputsdictinputvalues shape1 featuresshape batchsize featureseqlength np random seed4 masktimeindices computemaskindices featuresshape model config masktimeprob model config masktimelength minmasks2 masktimeindices torch fromnumpymasktimeindices totorchdevice with torch nograd outputs model inputsdict inputvalues totorchdevice masktimeindicesmasktimeindices compute cosine similarity cosinesim torch cosinesimilarityoutputs projectedstates outputs projectedquantizedstates dim1 retrieve cosine sim of masked features cosinesimmasked cosinesimmasktimeindices cosine similarity of model is all 0 5 as model is pretrained on contrastive loss fmt off expectedcosinesimmasked torch tensor 0 8523 0 5860 0 6905 0 5557 0 7456 0 5249 0 6639 0 7654 0 7565 0 8167 0 8222 0 7960 0 8034 0 8166 0 8310 0 8263 0 8274 0 8258 0 8179 0 8412 0 8536 0 5098 0 4728 0 6461 0 4498 0 6002 0 5774 0 6457 0 7123 0 5668 0 6866 0 4960 0 6293 0 7423 0 7419 0 7526 0 7768 0 4898 0 5393 0 8183 devicetorchdevice fmt on self asserttruetorch allclosecosinesimmasked expectedcosinesimmasked atol1e3 def testinferencepretrainedself model wav2vec2forpretraining frompretrainedfacebookwav2vec2base model totorchdevice featureextractor wav2vec2featureextractor frompretrained facebookwav2vec2base returnattentionmasktrue inputspeech self loaddatasamples2 inputsdict featureextractorinputspeech returntensorspt paddingtrue batchsize inputsdictinputvalues shape0 featureseqlength intmodel getfeatextractoutputlengthsinputsdictinputvalues shape1 featuresshape batchsize featureseqlength torch manualseed0 masktimeindices computemaskindices featuresshape model config masktimeprob model config masktimelength minmasks2 masktimeindices torch fromnumpymasktimeindices totorchdevice with torch nograd outputs model inputsdict inputvalues totorchdevice attentionmaskinputsdict attentionmask totorchdevice masktimeindicesmasktimeindices compute cosine similarity cosinesim torch cosinesimilarityoutputs projectedstates outputs projectedquantizedstates dim1 retrieve cosine sim of masked features cosinesimmasked cosinesimmasktimeindices now compare to randomly initialized model config wav2vec2config frompretrainedfacebookwav2vec2base modelrand wav2vec2forpretrainingconfig totorchdevice eval with torch nograd outputsrand modelrand inputsdict inputvalues totorchdevice attentionmaskinputsdict attentionmask totorchdevice masktimeindicesmasktimeindices compute cosine similarity cosinesimrand torch cosinesimilarity outputsrand projectedstates outputsrand projectedquantizedstates dim1 retrieve cosine sim of masked features cosinesimmaskedrand cosinesimrandmasktimeindices a pretrained wav2vec2 model has learned to predict the quantized latent states the cosine similarity between quantized states and predicted states 0 5 a random wav2vec2 model has not learned to predict the quantized latent states the cosine similarity between quantized states and predicted states is very likely 0 1 self asserttruecosinesimmasked mean item 5 cosinesimmaskedrand mean item 0 unittest skipiftorchdevice cpu cannot make deterministic on gpu def testlosspretrainingself model wav2vec2forpretraining frompretrained facebookwav2vec2base attentiondropout0 0 featprojdropout0 0 hiddendropout0 0 layerdrop0 0 model totorchdevice train featureextractor wav2vec2featureextractor frompretrained facebookwav2vec2base returnattentionmasktrue inputspeech self loaddatasamples2 inputsdict featureextractorinputspeech returntensorspt paddingtrue batchsize inputsdictinputvalues shape0 featureseqlength intmodel getfeatextractoutputlengthsinputsdictinputvalues shape1 featuresshape batchsize featureseqlength torch manualseed0 np random seed0 masktimeindices computemaskindices featuresshape model config masktimeprob model config masktimelength minmasks2 samplednegativeindices samplenegativeindices masktimeindices shape model config numnegatives masktimeindices masktimeindices torch fromnumpymasktimeindices totorchdevice samplednegativeindices torch fromnumpysamplednegativeindices totorchdevice with torch nograd outputs model inputsdict inputvalues totorchdevice attentionmaskinputsdict attentionmask totorchdevice masktimeindicesmasktimeindices samplednegativeindicessamplednegativeindices check diversity loss numcodevectors model config numcodevectorspergroup model config numcodevectorgroups diversityloss numcodevectors outputs codevectorperplexity numcodevectors self asserttrueabsdiversityloss item 0 9538 1e3 check overall loss contrastive loss diversity loss expectedloss 116 7094 self asserttrueabsoutputs loss item expectedloss 1e3 def testinferencekeywordspottingself model wav2vec2forsequenceclassification frompretrainedsuperbwav2vec2basesuperbks totorchdevice processor wav2vec2featureextractor frompretrainedsuperbwav2vec2basesuperbks inputdata self loadsuperbks 4 inputs processorinputdataspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask predictedlogits predictedids torch maxoutputs logits dim1 expectedlabels 7 6 10 9 s3prl logits for the same batch expectedlogits torch tensor6 1186 11 8961 10 2931 6 0898 devicetorchdevice self assertlistequalpredictedids tolist expectedlabels self asserttruetorch allclosepredictedlogits expectedlogits atol1e2 def testinferenceintentclassificationself model wav2vec2forsequenceclassification frompretrainedsuperbwav2vec2basesuperbic totorchdevice processor wav2vec2featureextractor frompretrainedsuperbwav2vec2basesuperbic inputdata self loadsuperbic 4 inputs processorinputdataspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask predictedlogitsaction predictedidsaction torch maxoutputs logits 6 dim1 predictedlogitsobject predictedidsobject torch maxoutputs logits 6 20 dim1 predictedlogitslocation predictedidslocation torch maxoutputs logits 20 24 dim1 expectedlabelsaction 0 0 2 3 expectedlogitsaction torch tensor0 4568 11 0848 1 6621 9 3841 devicetorchdevice expectedlabelsobject 3 10 3 4 expectedlogitsobject torch tensor1 5322 10 7094 5 2469 22 1318 devicetorchdevice expectedlabelslocation 0 0 0 1 expectedlogitslocation torch tensor1 5335 6 5096 10 5704 11 0569 devicetorchdevice self assertlistequalpredictedidsaction tolist expectedlabelsaction self assertlistequalpredictedidsobject tolist expectedlabelsobject self assertlistequalpredictedidslocation tolist expectedlabelslocation self asserttruetorch allclosepredictedlogitsaction expectedlogitsaction atol1e2 self asserttruetorch allclosepredictedlogitsobject expectedlogitsobject atol1e2 self asserttruetorch allclosepredictedlogitslocation expectedlogitslocation atol1e2 def testinferencespeakeridentificationself model wav2vec2forsequenceclassification frompretrainedsuperbwav2vec2basesuperbsid totorchdevice processor wav2vec2featureextractor frompretrainedsuperbwav2vec2basesuperbsid inputdata self loadsuperbsi 4 outputlogits with torch nograd for example in inputdataspeech input processorexample returntensorspt paddingtrue output modelinput inputvalues totorchdevice attentionmasknone outputlogits appendoutput logits0 outputlogits torch stackoutputlogits predictedlogits predictedids torch maxoutputlogits dim1 expectedlabels 251 1 1 3 s3prl logits for the same batch expectedlogits torch tensor37 5627 71 6362 64 2419 31 7778 devicetorchdevice self assertlistequalpredictedids tolist expectedlabels self asserttruetorch allclosepredictedlogits expectedlogits atol1e2 def testinferenceemotionrecognitionself model wav2vec2forsequenceclassification frompretrainedsuperbwav2vec2basesuperber totorchdevice processor wav2vec2featureextractor frompretrainedsuperbwav2vec2basesuperber inputdata self loadsuperber 4 inputs processorinputdataspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask predictedlogits predictedids torch maxoutputs logits dim1 expectedlabels 1 1 2 2 s3prl logits for the same batch expectedlogits torch tensor2 1722 3 0779 8 0287 6 6797 devicetorchdevice self assertlistequalpredictedids tolist expectedlabels self asserttruetorch allclosepredictedlogits expectedlogits atol1e2 def testphonemerecognitionself model wav2vec2forctc frompretrainedfacebookwav2vec2lv60espeakcvft totorchdevice processor wav2vec2processor frompretrainedfacebookwav2vec2lv60espeakcvft inputspeech self loaddatasamples4 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd logits modelinputvalues attentionmaskattentionmask logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions m n s d t j u n v s s a z s t s w t k v d b i n z b d i t k l n t t a t l o n k l w z o n l i m n t h i w k a t n h z t s t s t l d p b l d e k v h z o v s t e n d a z i v n s i n a n d h m w a z n d z v s p k t e z w t v l i z n t w k b a t h z n s t n t v p n k w z f l o d b a s m l p b l o h a n h z t s t should correspond to a man said to the universe sir i exist sweat covered brion s body trickling into the tight loin cloth that was the only garment he wore the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about his instant panic was followed by a small sharp blow high on his chest self assertlistequalpredictedtrans expectedtranscriptions requirepyctcdecode requiretorchaudio def testwav2vec2withlmself ds loaddatasetmozillafoundationcommonvoice110 es splittest streamingtrue sample nextiterds resampledaudio torchaudio functional resample torch tensorsampleaudioarray 48000 16000 numpy model wav2vec2forctc frompretrainedpatrickvonplatenwav2vec2largexlsr53spanishwithlm to torchdevice processor wav2vec2processorwithlm frompretrainedpatrickvonplatenwav2vec2largexlsr53spanishwithlm inputvalues processorresampledaudio returntensorspt inputvalues with torch nograd logits modelinputvalues totorchdevice logits transcription processor batchdecodelogits cpu numpy text self assertequaltranscription0 habitan aguas poco profundas y rocosas requirepyctcdecode requiretorchaudio def testwav2vec2withlmpoolself ds loaddatasetmozillafoundationcommonvoice110 es splittest streamingtrue sample nextiterds resampledaudio torchaudio functional resample torch tensorsampleaudioarray 48000 16000 numpy model wav2vec2forctc frompretrainedpatrickvonplatenwav2vec2largexlsr53spanishwithlm to torchdevice processor wav2vec2processorwithlm frompretrainedpatrickvonplatenwav2vec2largexlsr53spanishwithlm inputvalues processorresampledaudio returntensorspt inputvalues with torch nograd logits modelinputvalues totorchdevice logits test usermanaged pool with multiprocessing getcontextfork pool2 as pool transcription processor batchdecodelogits cpu numpy pool text self assertequaltranscription0 habitan aguas poco profundas y rocosas usermanaged pool numprocesses should trigger a warning with captureloggerprocessingwav2vec2withlm logger as cl multiprocessing getcontextfork pool 2 as pool transcription processor batchdecodelogits cpu numpy pool numprocesses2 text self assertinnumprocess cl out self assertinit will be ignored cl out self assertequaltranscription0 habitan aguas poco profundas y rocosas requirepyctcdecode requiretorchaudio def testwav2vec2withlminvalidpoolself runtestinsubprocesstestcaseself targetfunctestwav2vec2withlminvalidpool inputsnone def testinferencediarizationself model wav2vec2foraudioframeclassification frompretrainedantonlwav2vec2basesuperbsd totorchdevice processor wav2vec2featureextractor frompretrainedantonlwav2vec2basesuperbsd inputdata self loadsuperbsd 4 inputs processorinputdataspeech returntensorspt paddingtrue samplingrate16000 inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask labels is a onehot array of shape numframes numspeakers labels outputs logits 0 long s3prl logits for the same batch expectedlogits torch tensor 5 2807 5 1272 5 4059 4 7757 5 2764 4 9621 5 0117 4 5851 1 7643 0 5462 1 7369 0 2649 1 5066 0 6200 4 5703 2 4863 0 8656 0 4783 0 8899 0 3289 0 9267 0 5781 0 7817 0 4619 4 8625 2 5316 5 2339 2 2155 4 9835 2 0344 4 4727 1 8421 devicetorchdevice self assertequallabels0 0 sum 555 self assertequallabels0 1 sum 299 todo update the tolerance after the ci moves to torch 1 10 self asserttruetorch allcloseoutputs logits 4 expectedlogits atol1e2 def testinferencespeakerverificationself model wav2vec2forxvector frompretrainedantonlwav2vec2basesuperbsv totorchdevice processor wav2vec2featureextractor frompretrainedantonlwav2vec2basesuperbsv inputdata self loadsuperbsi 4 inputs processorinputdataspeech returntensorspt paddingtrue samplingrate16000 labels torch tensor5 1 1 3 devicetorchdevice t with torch nograd inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice outputs modelinputvalues attentionmaskattentionmask labelslabels embeddings torch nn functional normalizeoutputs embeddings dim1 cpu cosinesim torch nn cosinesimilaritydim1 id10002 vs id10002 self assertalmostequalcosinesimembeddings1 embeddings2 numpy 0 9758 3 id10006 vs id10002 self assertalmostequalcosinesimembeddings0 embeddings1 numpy 0 7579 3 id10002 vs id10004 self assertalmostequalcosinesimembeddings2 embeddings3 numpy 0 7594 3 todo update the tolerance after the ci moves to torch 1 10 self assertalmostequaloutputs loss item 17 7963 2 requiretorchaudio def testinferencemms1ballself model wav2vec2forctc frompretrainedfacebookmms1ball totorchdevice processor wav2vec2processor frompretrainedfacebookmms1ball langmap it ita es spa fr fra en eng def runmodellang ds loaddatasetmozillafoundationcommonvoice110 lang splittest streamingtrue sample nextiterds wav2vec2lang langmaplang model loadadapterwav2vec2lang processor tokenizer settargetlangwav2vec2lang resampledaudio torchaudio functional resample torch tensorsampleaudioarray 48000 16000 numpy inputs processorresampledaudio samplingrate16000 returntensorspt inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask logits ids torch argmaxoutputs dim10 transcription processor decodeids return transcription transcriptions it il libro ha suscitato molte polemiche a causa dei suoi contenuti es habitan aguas poco profundas y rocosas fr ce dernier est vol tout au long de l histoire romaine en joe keton disapproved of films and buster also had reservations about the media for lang in langmap keys assert runmodellang transcriptionslang coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch wav2vec2 model use a spawn pool which should trigger a warning if different than fork force batch_decode to internally create a spawn pool which should trigger a warning if different than fork speech is longer this is most likely not correctly set yet test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 pad input make sure that dropout is disabled pad input make sure that dropout is disabled pad input freeze feature encoder pad input it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf freeze everything but the classification head pad input freeze everything but the classification head pad input wav2vec2 has no inputs_embeds input_ids is renamed to input_values wav2vec2 cannot resize token embeddings since it has no tokens embeddings wav2vec2 has no inputs_embeds and thus the get_input_embeddings fn is not implemented non robust architecture does not exist in flax non robust architecture does not exist in flax no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common wav2vec2 cannot be torchscripted because of group norm todo fix it to be sure we have no nan test that the model can be serialized and restored properly avoid memory leak without this each call increase ram usage by 20mb even with this call there are still memory leak by 0 04mb wav2vec2 has no inputs_embeds input_ids is renamed to input_values wav2vec2 cannot resize token embeddings since it has no tokens embeddings wav2vec2 has no inputs_embeds and thus the get_input_embeddings fn is not implemented no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common more losses loss_more_masked has to be bigger or equal loss since more masked inputs have to be predicted test that loading adapter weights with mismatched vocab sizes can be loaded save safe weights save pt weights with these settings num_masked_spans 0 5 which means probabilistic rounding ensures that in 5 out of 10 method calls num_masked_spans 0 and in the other 5 out of 10 cases num_masked_spans 1 as we test for at least 10 masked dimension and at least 10 non masked dimension this test could fail with probability p 100 coin flips at most 9 heads 1 66e 18 because of overlap mask don t have to add up exactly to mask_prob sequence_length but have to be smaller or equal force one example to be heavily padded make sure that non padded examples cannot be padded mask half of the input each value in vector consits of same value sample negative indices make sure no negatively sampled vector is actually a positive one make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hidden_size dim second half of last input tensor is padded each value in vector consits of same value replace masked feature vectors with 100 to test that those are not sampled sample negative indices make sure no negatively sampled vector is actually a positive one make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hidden_size dim clean up as much as possible gpu memory occupied by pytorch automatic decoding with librispeech compute cosine similarity retrieve cosine sim of masked features cosine similarity of model is all 0 5 as model is pre trained on contrastive loss fmt off fmt on compute cosine similarity retrieve cosine sim of masked features now compare to randomly initialized model compute cosine similarity retrieve cosine sim of masked features a pretrained wav2vec2 model has learned to predict the quantized latent states the cosine similarity between quantized states and predicted states 0 5 a random wav2vec2 model has not learned to predict the quantized latent states the cosine similarity between quantized states and predicted states is very likely 0 1 check diversity loss check overall loss contrastive loss diversity loss s3prl logits for the same batch s3prl logits for the same batch s3prl logits for the same batch should correspond to a man said to the universe sir i exist sweat covered brion s body trickling into the tight loin cloth that was the only garment he wore the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about his instant panic was followed by a small sharp blow high on his chest test user managed pool user managed pool num_processes should trigger a warning labels is a one hot array of shape num_frames num_speakers s3prl logits for the same batch todo update the tolerance after the ci moves to torch 1 10 id10002 vs id10002 id10006 vs id10002 id10002 vs id10004 todo update the tolerance after the ci moves to torch 1 10
import gc import math import multiprocessing import os import pickle import tempfile import traceback import unittest import numpy as np from datasets import load_dataset from transformers import Wav2Vec2Config, is_torch_available from transformers.testing_utils import ( CaptureLogger, backend_empty_cache, is_pt_flax_cross_test, is_pyctcdecode_available, is_torchaudio_available, require_pyctcdecode, require_soundfile, require_torch, require_torchaudio, run_test_in_subprocess, slow, torch_device, ) from transformers.utils import is_torch_fx_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from safetensors.torch import save_file as safe_save_file from transformers import ( Wav2Vec2FeatureExtractor, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2ForSequenceClassification, Wav2Vec2ForXVector, Wav2Vec2Model, Wav2Vec2Processor, ) from transformers.models.wav2vec2.modeling_wav2vec2 import ( WAV2VEC2_ADAPTER_PT_FILE, WAV2VEC2_ADAPTER_SAFE_FILE, Wav2Vec2GumbelVectorQuantizer, _compute_mask_indices, _sample_negative_indices, ) if is_torchaudio_available(): import torchaudio if is_pyctcdecode_available(): import pyctcdecode.decoder from transformers import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm import processing_wav2vec2_with_lm if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) ds = load_dataset("mozilla-foundation/common_voice_11_0", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits with CaptureLogger(pyctcdecode.decoder.logger) as cl, multiprocessing.get_context("spawn").Pool(1) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") multiprocessing.set_start_method("spawn", force=True) with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl: transcription = processor.batch_decode(logits.cpu().numpy()).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class Wav2Vec2ModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return Wav2Vec2Config( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_with_attn_adapter(self, config, input_values, attention_mask): config.adapter_attn_dim = 16 model = Wav2Vec2ForCTC(config=config) self.parent.assertIsNotNone(model._get_adapters()) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.output_seq_length, self.vocab_size)) def create_and_check_batch_inference(self, config, input_values, *args): model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.train() model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForXVector(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Wav2Vec2ForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Wav2Vec2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2ForSequenceClassification, "automatic-speech-recognition": Wav2Vec2ForCTC, "feature-extraction": Wav2Vec2Model, "fill-mask": Wav2Vec2ForMaskedLM, } if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): pass @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): self.skipTest("torch 2.1 breaks torch fx tests for wav2vec2/hubert.") if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) if ( isinstance(model, Wav2Vec2ForSequenceClassification) and not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) self.clear_torch_jit_class_registry() @unittest.skip( "Need to investigate why config.do_stable_layer_norm is set to False here when it doesn't seem to be supported" ) def test_flax_from_pt_safetensors(self): return @require_torch class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForXVector, ) if is_torch_available() else () ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_model_with_attn_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_attn_adapter(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_model_for_pretraining(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = Wav2Vec2ForPreTraining(config).to(torch_device) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss mask_time_indices[:, : mask_time_indices.shape[-1] // 2] = True sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices.cpu().numpy()) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss_more_masked = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss self.assertTrue(loss.detach().item() <= loss_more_masked.detach().item()) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_load_and_set_attn_adapter(self): processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) def get_logits(model, input_features): model = model.to(torch_device) batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", ) with torch.no_grad(): logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits return logits input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]] model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter", target_lang="it") logits = get_logits(model, input_features) model_2 = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter") model_2.load_adapter("it") logits_2 = get_logits(model_2, input_features) self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3)) def test_load_target_lang_with_mismatched_size(self): processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) def get_logits(model, input_features): model = model.to(torch_device) batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", ) with torch.no_grad(): logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits return logits input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]] model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-adapter", target_lang="fr", ignore_mismatched_sizes=True ) logits = get_logits(model, input_features) model_2 = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter") model_2.load_adapter("fr") logits_2 = get_logits(model_2, input_features) self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3)) def test_load_attn_adapter(self): processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) def get_logits(model, input_features): model = model.to(torch_device) batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", ) with torch.no_grad(): logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits return logits input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]] model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", adapter_attn_dim=16) with tempfile.TemporaryDirectory() as tempdir: model.save_pretrained(tempdir) model = Wav2Vec2ForCTC.from_pretrained(tempdir) logits = get_logits(model, input_features) adapter_weights = model._get_adapters() safe_filepath = os.path.join(tempdir, WAV2VEC2_ADAPTER_SAFE_FILE.format("eng")) safe_save_file(adapter_weights, safe_filepath, metadata={"format": "pt"}) model.load_adapter("eng") model.load_adapter("eng", use_safetensors=True) with self.assertRaises(OSError): model.load_adapter("eng", use_safetensors=False) with self.assertRaises(Exception): model.load_adapter("ita", use_safetensors=True) logits_2 = get_logits(model, input_features) self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3)) with tempfile.TemporaryDirectory() as tempdir: model.save_pretrained(tempdir) model = Wav2Vec2ForCTC.from_pretrained(tempdir) logits = get_logits(model, input_features) adapter_weights = model._get_adapters() pt_filepath = os.path.join(tempdir, WAV2VEC2_ADAPTER_PT_FILE.format("eng")) torch.save(adapter_weights, pt_filepath) model.load_adapter("eng") model.load_adapter("eng", use_safetensors=False) with self.assertRaises(OSError): model.load_adapter("eng", use_safetensors=True) logits_2 = get_logits(model, input_features) self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3)) model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter") logits = get_logits(model, input_features) model.load_adapter("eng") model.load_adapter("eng", use_safetensors=False) model.load_adapter("eng", use_safetensors=True) logits_2 = get_logits(model, input_features) self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3)) @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) @require_torch class Wav2Vec2UtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) def test_compute_perplexity(self): probs = torch.arange(100, device=torch_device).reshape(2, 5, 10) / 100 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) mask = torch.ones((2,), device=torch_device, dtype=torch.bool) mask[0] = 0 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 sequence = torch.div( torch.arange(sequence_length * hidden_size, device=torch_device), hidden_size, rounding_mode="floor" ) features = sequence.view(sequence_length, hidden_size) features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 sequence = torch.div( torch.arange(sequence_length * hidden_size, device=torch_device), hidden_size, rounding_mode="floor" ) features = sequence.view(sequence_length, hidden_size) features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @require_soundfile @slow class Wav2Vec2ModelIntegrationTest(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_ctc_normal(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_robust_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_inference_integration(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) np.random.seed(4) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), mask_time_indices=mask_time_indices, ) cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) cosine_sim_masked = cosine_sim[mask_time_indices] expected_cosine_sim_masked = torch.tensor([ 0.8523, 0.5860, 0.6905, 0.5557, 0.7456, 0.5249, 0.6639, 0.7654, 0.7565, 0.8167, 0.8222, 0.7960, 0.8034, 0.8166, 0.8310, 0.8263, 0.8274, 0.8258, 0.8179, 0.8412, 0.8536, 0.5098, 0.4728, 0.6461, 0.4498, 0.6002, 0.5774, 0.6457, 0.7123, 0.5668, 0.6866, 0.4960, 0.6293, 0.7423, 0.7419, 0.7526, 0.7768, 0.4898, 0.5393, 0.8183 ], device=torch_device) self.assertTrue(torch.allclose(cosine_sim_masked, expected_cosine_sim_masked, atol=1e-3)) def test_inference_pretrained(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) cosine_sim_masked = cosine_sim[mask_time_indices] config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-base") model_rand = Wav2Vec2ForPreTraining(config).to(torch_device).eval() with torch.no_grad(): outputs_rand = model_rand( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) cosine_sim_rand = torch.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states, dim=-1 ) cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_loss_pretraining(self): model = Wav2Vec2ForPreTraining.from_pretrained( "facebook/wav2vec2-base", attention_dropout=0.0, feat_proj_dropout=0.0, hidden_dropout=0.0, layerdrop=0.0, ) model.to(torch_device).train() feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) np.random.seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices( mask_time_indices.shape, model.config.num_negatives, mask_time_indices ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ) num_codevectors = model.config.num_codevectors_per_group * model.config.num_codevector_groups diversity_loss = (num_codevectors - outputs.codevector_perplexity) / num_codevectors self.assertTrue(abs(diversity_loss.item() - 0.9538) < 1e-3) expected_loss = 116.7094 self.assertTrue(abs(outputs.loss.item() - expected_loss) < 1e-3) def test_inference_keyword_spotting(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ks").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ks") input_data = self._load_superb("ks", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [7, 6, 10, 9] expected_logits = torch.tensor([6.1186, 11.8961, 10.2931, 6.0898], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_inference_intent_classification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ic").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ic") input_data = self._load_superb("ic", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits_action, predicted_ids_action = torch.max(outputs.logits[:, :6], dim=-1) predicted_logits_object, predicted_ids_object = torch.max(outputs.logits[:, 6:20], dim=-1) predicted_logits_location, predicted_ids_location = torch.max(outputs.logits[:, 20:24], dim=-1) expected_labels_action = [0, 0, 2, 3] expected_logits_action = torch.tensor([0.4568, 11.0848, 1.6621, 9.3841], device=torch_device) expected_labels_object = [3, 10, 3, 4] expected_logits_object = torch.tensor([1.5322, 10.7094, 5.2469, 22.1318], device=torch_device) expected_labels_location = [0, 0, 0, 1] expected_logits_location = torch.tensor([1.5335, 6.5096, 10.5704, 11.0569], device=torch_device) self.assertListEqual(predicted_ids_action.tolist(), expected_labels_action) self.assertListEqual(predicted_ids_object.tolist(), expected_labels_object) self.assertListEqual(predicted_ids_location.tolist(), expected_labels_location) self.assertTrue(torch.allclose(predicted_logits_action, expected_logits_action, atol=1e-2)) self.assertTrue(torch.allclose(predicted_logits_object, expected_logits_object, atol=1e-2)) self.assertTrue(torch.allclose(predicted_logits_location, expected_logits_location, atol=1e-2)) def test_inference_speaker_identification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-sid").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-sid") input_data = self._load_superb("si", 4) output_logits = [] with torch.no_grad(): for example in input_data["speech"]: input = processor(example, return_tensors="pt", padding=True) output = model(input.input_values.to(torch_device), attention_mask=None) output_logits.append(output.logits[0]) output_logits = torch.stack(output_logits) predicted_logits, predicted_ids = torch.max(output_logits, dim=-1) expected_labels = [251, 1, 1, 3] expected_logits = torch.tensor([37.5627, 71.6362, 64.2419, 31.7778], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_inference_emotion_recognition(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-er").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-er") input_data = self._load_superb("er", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [1, 1, 2, 2] expected_logits = torch.tensor([2.1722, 3.0779, 8.0287, 6.6797], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_phoneme_recognition(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "ɐ m æ n s ɛ d t ə ð ə j uː n ɪ v ɚ s s ɚ aɪ ɛ ɡ z ɪ s t", "s w ɛ t k ʌ v ɚ d b ɹ iː ɔ n z b ɑː d i t ɹ ɪ k l ɪ ŋ ɪ n t ə ð ə t aɪ t l oɪ n k l ɑː θ ð æ w ʌ z ð ɪ oʊ" " n l i ɡ ɑːɹ m ə n t h iː w ɔːɹ", "ð ə k aɪ t ɔ n h ɪ z tʃ ɛ s t s t ɪ l d ɹ ɪ p ɪ ŋ b l ʌ d ð ɪ eɪ k ʌ v h ɪ z oʊ v ɚ s t ɹ eɪ n d aɪ z iː" " v ə n ð ə s ɔːɹ ɹ ɪ ŋ ɐ ɹ iː n ɐ ɚ ɹ aʊ n d h ɪ m w ɪ ð ə θ aʊ z ə n d z ʌ v s p ɛ k t eɪ ɾ ɚ z w ɜː t ɹ" " ɪ v ɪ æ l ᵻ ɾ i z n ɑː t w ɜː θ θ ɪ ŋ k ɪ ŋ ɐ b aʊ t", "h ɪ z ɪ n s t ə n t v p æ n ɪ k w ʌ z f ɑː l oʊ d b aɪ ɐ s m ɔː l ʃ ɑːɹ p b l oʊ h aɪ ɔ n h ɪ z tʃ ɛ s t", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm(self): ds = load_dataset("mozilla-foundation/common_voice_11_0", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits transcription = processor.batch_decode(logits.cpu().numpy()).text self.assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm_pool(self): ds = load_dataset("mozilla-foundation/common_voice_11_0", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits with multiprocessing.get_context("fork").Pool(2) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text self.assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl, multiprocessing.get_context("fork").Pool( 2 ) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool, num_processes=2).text self.assertIn("num_process", cl.out) self.assertIn("it will be ignored", cl.out) self.assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm_invalid_pool(self): run_test_in_subprocess(test_case=self, target_func=_test_wav2vec2_with_lm_invalid_pool, inputs=None) def test_inference_diarization(self): model = Wav2Vec2ForAudioFrameClassification.from_pretrained("anton-l/wav2vec2-base-superb-sd").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) labels = (outputs.logits > 0).long() expected_logits = torch.tensor( [ [[-5.2807, -5.1272], [-5.4059, -4.7757], [-5.2764, -4.9621], [-5.0117, -4.5851]], [[-1.7643, -0.5462], [-1.7369, -0.2649], [-1.5066, -0.6200], [-4.5703, -2.4863]], [[-0.8656, -0.4783], [-0.8899, -0.3289], [-0.9267, -0.5781], [-0.7817, -0.4619]], [[-4.8625, -2.5316], [-5.2339, -2.2155], [-4.9835, -2.0344], [-4.4727, -1.8421]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 555) self.assertEqual(labels[0, :, 1].sum(), 299) self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): model = Wav2Vec2ForXVector.from_pretrained("anton-l/wav2vec2-base-superb-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1).cpu() cosine_sim = torch.nn.CosineSimilarity(dim=-1) self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).numpy(), 0.9758, 3) self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).numpy(), 0.7579, 3) self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).numpy(), 0.7594, 3) self.assertAlmostEqual(outputs.loss.item(), 17.7963, 2) @require_torchaudio def test_inference_mms_1b_all(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/mms-1b-all").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/mms-1b-all") LANG_MAP = {"it": "ita", "es": "spa", "fr": "fra", "en": "eng"} def run_model(lang): ds = load_dataset("mozilla-foundation/common_voice_11_0", lang, split="test", streaming=True) sample = next(iter(ds)) wav2vec2_lang = LANG_MAP[lang] model.load_adapter(wav2vec2_lang) processor.tokenizer.set_target_lang(wav2vec2_lang) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() inputs = processor(resampled_audio, sampling_rate=16_000, return_tensors="pt") input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask).logits ids = torch.argmax(outputs, dim=-1)[0] transcription = processor.decode(ids) return transcription TRANSCRIPTIONS = { "it": "il libro ha suscitato molte polemiche a causa dei suoi contenuti", "es": "habitan aguas poco profundas y rocosas", "fr": "ce dernier est volé tout au long de l'histoire romaine", "en": "joe keton disapproved of films and buster also had reservations about the media", } for lang in LANG_MAP.keys(): assert run_model(lang) == TRANSCRIPTIONS[lang]
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import json import os import shutil import tempfile import unittest from transformers.models.wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES from transformers.utils import FEATURE_EXTRACTOR_NAME from .test_feature_extraction_wav2vec2 import floats_list class Wav2Vec2ProcessorTest(unittest.TestCase): def setUp(self): vocab = "<pad> <s> </s> <unk> | E T A O N I H S R D L U M W C F G Y P B V K ' X J Q Z".split(" ") vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.add_kwargs_tokens_map = { "pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "return_attention_mask": False, "do_normalize": True, } self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") def get_tokenizer(self, **kwargs_init): kwargs = self.add_kwargs_tokens_map.copy() kwargs.update(kwargs_init) return Wav2Vec2CTCTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return Wav2Vec2FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = Wav2Vec2Processor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, Wav2Vec2CTCTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, Wav2Vec2FeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = Wav2Vec2Processor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = Wav2Vec2Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, Wav2Vec2CTCTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, Wav2Vec2FeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", )
codingutf8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the wav2vec2 tokenizer import inspect import json import os import random import shutil import tempfile import unittest import numpy as np from transformers import wav2vec2pretrainedmodelarchivelist addedtoken wav2vec2config wav2vec2ctctokenizer wav2vec2tokenizer from transformers models wav2vec2 tokenizationwav2vec2 import vocabfilesnames wav2vec2ctctokenizeroutput from transformers testingutils import requiretorch slow from testtokenizationcommon import tokenizertestermixin globalrng random random copied from tests models whisper testfeatureextractionwhisper floatslist def floatslistshape scale1 0 rngnone namenone todopvp change to facebook todopvp change to facebook tests that all call wrap to encodeplus and batchencodeplus create three inputs of length 800 1000 and 1200 test not batched input test batched test 2d numpy arrays are batched padding should be 0 0 padding should be 0 0 padding should be 0 0 checks everything loads correctly in the same way check special tokens are set accordingly on rust and python isolate this from the other tests because we save additional tokensetc isolate this from the other tests because we save additional tokensetc default case no attentionmask is returned wav2vec2lv60 return attentionmask this test makes sure that models that are using group norm don t have their tokenizer return the attentionmask only layer feature extraction norm should make use of attentionmask check adding a single token check adding a single token fmt off fmt on fmt off fmt on fmt off heeeeelllpadlounk he llounk 1h 5e 2 3l 1pad 1l 1o 1unk fmt on check wav2vec2ctctokenizeroutput keys for char check wav2vec2ctctokenizeroutput keys for word check wav2vec2ctctokenizeroutput keys for both check that order of chars is correct and identical for both outputs check that order of words is correct and identical to both outputs check that offsets are actually correct for char 0 is h 1 is e 6 is 8 is 1st l 12 is 2nd l 13 is o 14 is unk 1 is h 6 is e 8 is 11 is 1st l note due to pad different begin of 2nd l 13 is 2nd l 14 is o 15 is unk check that offsets are actually correct for word h is at 1st position of first word first l is at 8th position of second word last e is at 6th position of first word first l is at last 15th position of second word double spaces don t get counted transform list to modeloutput fmt off fmt on we assume that decode works as expected all we will check now is the output type is correct and the output is identical to decode char word both predids correspond to the following code from transformers import autotokenizer autofeatureextractor automodelforctc from datasets import loaddataset import datasets import torch model automodelforctc frompretrainedfacebookwav2vec2base960h featureextractor autofeatureextractor frompretrainedfacebookwav2vec2base960h ds loaddatasetcommonvoice en splittrain streamingtrue ds ds castcolumnaudio datasets audiosamplingrate16000 dsiter iterds sample nextdsiter inputvalues featureextractorsampleaudioarray returntensorspt inputvalues logits modelinputvalues logits predids torch argmaxlogits axis1 cpu tolist fmt off wav2vec2base downsamples input audio by a factor of 320 sampling rate for wav2vec2base is 16000 fmt on let s transform offsets to time stamps in seconds note you can verify the above results by checking out the dataset viewer on https huggingface codatasetscommonvoiceviewerentrain and downloading playing the sample commonvoiceen100038 mp3 as you can hear the timestamps match more or less wav2vec2model has no max model length no testing overwrite from testtokenizationcommon we usually have added tokens from the start in tests because our vocab fixtures are smaller than the original vocabs let s not assert this self assertequalvocabsize allsize the default common tokenizer tests assumes that the output of converttokenstostring is a string which is not the case for wav2vec2 should have saved target lang as ita since it was last one coding utf 8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the wav2vec2 tokenizer copied from tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor todo pvp change to facebook todo pvp change to facebook tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test not batched input test batched test 2 d numpy arrays are batched padding should be 0 0 padding should be 0 0 padding should be 0 0 checks everything loads correctly in the same way check special tokens are set accordingly on rust and python isolate this from the other tests because we save additional tokens etc isolate this from the other tests because we save additional tokens etc default case no attention_mask is returned wav2vec2 lv60 return attention_mask this test makes sure that models that are using group norm don t have their tokenizer return the attention_mask only layer feature extraction norm should make use of attention_mask check adding a single token check adding a single token fmt off fmt on fmt off fmt on unk_token unk fmt off heeeee lll pad lo unk he llo unk 1h 5e 2 3l 1 pad 1l 1o 1 unk fmt on check wav2vec2ctctokenizeroutput keys for char check wav2vec2ctctokenizeroutput keys for word check wav2vec2ctctokenizeroutput keys for both check that order of chars is correct and identical for both outputs check that order of words is correct and identical to both outputs check that offsets are actually correct for char 0 is h 1 is e 6 is 8 is 1st l 12 is 2nd l 13 is o 14 is unk 1 is h 6 is e 8 is 11 is 1st l note due to pad different begin of 2nd l 13 is 2nd l 14 is o 15 is unk check that offsets are actually correct for word h is at 1st position of first word first l is at 8th position of second word last e is at 6th position of first word first l is at last 15th position of second word double spaces don t get counted transform list to modeloutput fmt off fmt on we assume that decode works as expected all we will check now is the output type is correct and the output is identical to decode char word both pred_ids correspond to the following code from transformers import autotokenizer autofeatureextractor automodelforctc from datasets import load_dataset import datasets import torch model automodelforctc from_pretrained facebook wav2vec2 base 960h feature_extractor autofeatureextractor from_pretrained facebook wav2vec2 base 960h ds load_dataset common_voice en split train streaming true ds ds cast_column audio datasets audio sampling_rate 16_000 ds_iter iter ds sample next ds_iter input_values feature_extractor sample audio array return_tensors pt input_values logits model input_values logits pred_ids torch argmax logits axis 1 cpu tolist fmt off wav2vec2 base downsamples input audio by a factor of 320 sampling rate for wav2vec2 base is 16_000 fmt on let s transform offsets to time stamps in seconds note you can verify the above results by checking out the dataset viewer on https huggingface co datasets common_voice viewer en train and downloading playing the sample common_voice_en_100038 mp3 as you can hear the time stamps match more or less wav2vec2model has no max model length no testing overwrite from test_tokenization_common we usually have added tokens from the start in tests because our vocab fixtures are smaller than the original vocabs let s not assert this self assertequal vocab_size all_size the default common tokenizer tests assumes that the output of convert_tokens_to_string is a string which is not the case for wav2vec2 should have saved target lang as ita since it was last one
import inspect import json import os import random import shutil import tempfile import unittest import numpy as np from transformers import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, AddedToken, Wav2Vec2Config, Wav2Vec2CTCTokenizer, Wav2Vec2Tokenizer, ) from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES, Wav2Vec2CTCTokenizerOutput from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class Wav2Vec2TokenizerTest(unittest.TestCase): tokenizer_class = Wav2Vec2Tokenizer def setUp(self): super().setUp() vocab = "<pad> <s> </s> <unk> | E T A O N I H S R D L U M W C F G Y P B V K ' X J Q Z".split(" ") vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return Wav2Vec2Tokenizer.from_pretrained(self.tmpdirname, **kwargs) def test_tokenizer_decode(self): tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h") sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, tokenizer.word_delimiter_token_id, 24, 22, 5, 77], ] tokens = tokenizer.decode(sample_ids[0]) batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["HELLO<unk>", "BYE BYE<unk>"]) def test_tokenizer_decode_special(self): tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h") sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, tokenizer.word_delimiter_token_id, 24, 22, 5, 77], ] sample_ids_2 = [ [11, 5, 5, 5, 5, 5, 15, 15, 15, tokenizer.pad_token_id, 15, 8, 98], [ 24, 22, 5, tokenizer.pad_token_id, tokenizer.pad_token_id, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 24, 22, 5, 77, tokenizer.word_delimiter_token_id, ], ] batch_tokens = tokenizer.batch_decode(sample_ids) batch_tokens_2 = tokenizer.batch_decode(sample_ids_2) self.assertEqual(batch_tokens, batch_tokens_2) self.assertEqual(batch_tokens, ["HELLO<unk>", "BYE BYE<unk>"]) def test_tokenizer_decode_added_tokens(self): tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h") tokenizer.add_tokens(["!", "?"]) tokenizer.add_special_tokens({"cls_token": "$$$"}) sample_ids = [ [ 11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 32, 32, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34, ], [24, 22, 5, tokenizer.word_delimiter_token_id, 24, 22, 5, 77, tokenizer.pad_token_id, 34, 34], ] batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(batch_tokens, ["HELLO<unk>!?!?$$$", "BYE BYE<unk>$$$"]) def test_call(self): tokenizer = self.get_tokenizer() speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] encoded_sequences_1 = tokenizer(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = tokenizer(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = tokenizer(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = tokenizer(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = tokenizer(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = tokenizer(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_padding(self, max_length=50): def _input_values_have_equal_length(input_values): length = len(input_values[0]) for input_values_slice in input_values[1:]: if len(input_values_slice) != length: return False return True def _input_values_are_equal(input_values_1, input_values_2): if len(input_values_1) != len(input_values_2): return False for input_values_slice_1, input_values_slice_2 in zip(input_values_1, input_values_2): if not np.allclose(np.asarray(input_values_slice_1), np.asarray(input_values_slice_2), atol=1e-3): return False return True tokenizer = self.get_tokenizer() speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] input_values_1 = tokenizer(speech_inputs).input_values input_values_2 = tokenizer(speech_inputs, padding="longest").input_values input_values_3 = tokenizer(speech_inputs, padding="longest", max_length=1600).input_values self.assertFalse(_input_values_have_equal_length(input_values_1)) self.assertTrue(_input_values_have_equal_length(input_values_2)) self.assertTrue(_input_values_have_equal_length(input_values_3)) self.assertTrue(_input_values_are_equal(input_values_2, input_values_3)) self.assertTrue(len(input_values_1[0]) == 800) self.assertTrue(len(input_values_2[0]) == 1200) self.assertTrue(abs(sum(np.asarray(input_values_2[0])[800:])) < 1e-3) self.assertTrue(abs(sum(np.asarray(input_values_2[1])[1000:])) < 1e-3) input_values_4 = tokenizer(speech_inputs, padding="max_length").input_values input_values_5 = tokenizer(speech_inputs, padding="max_length", max_length=1600).input_values self.assertTrue(_input_values_are_equal(input_values_1, input_values_4)) self.assertEqual(input_values_5.shape, (3, 1600)) self.assertTrue(abs(sum(np.asarray(input_values_5[0])[800:1200])) < 1e-3) input_values_6 = tokenizer(speech_inputs, pad_to_multiple_of=500).input_values input_values_7 = tokenizer(speech_inputs, padding="longest", pad_to_multiple_of=500).input_values input_values_8 = tokenizer( speech_inputs, padding="max_length", pad_to_multiple_of=500, max_length=2400 ).input_values self.assertTrue(_input_values_are_equal(input_values_1, input_values_6)) self.assertEqual(input_values_7.shape, (3, 1500)) self.assertEqual(input_values_8.shape, (3, 2500)) self.assertTrue(abs(sum(np.asarray(input_values_7[0])[800:])) < 1e-3) self.assertTrue(abs(sum(np.asarray(input_values_7[1])[1000:])) < 1e-3) self.assertTrue(abs(sum(np.asarray(input_values_7[2])[1200:])) < 1e-3) self.assertTrue(abs(sum(np.asarray(input_values_8[0])[800:])) < 1e-3) self.assertTrue(abs(sum(np.asarray(input_values_8[1])[1000:])) < 1e-3) self.assertTrue(abs(sum(np.asarray(input_values_8[2])[1200:])) < 1e-3) def test_save_pretrained(self): pretrained_name = list(self.tokenizer_class.pretrained_vocab_files_map["vocab_file"].keys())[0] tokenizer = self.tokenizer_class.from_pretrained(pretrained_name) tmpdirname2 = tempfile.mkdtemp() tokenizer_files = tokenizer.save_pretrained(tmpdirname2) self.assertSequenceEqual( sorted(tuple(VOCAB_FILES_NAMES.values()) + ("special_tokens_map.json", "added_tokens.json")), sorted(x.split(os.path.sep)[-1] for x in tokenizer_files), ) tokenizer_p = self.tokenizer_class.from_pretrained(tmpdirname2) for key in tokenizer.special_tokens_map: self.assertTrue(key in tokenizer_p.special_tokens_map) shutil.rmtree(tmpdirname2) def test_get_vocab(self): tokenizer = self.get_tokenizer() vocab_dict = tokenizer.get_vocab() self.assertIsInstance(vocab_dict, dict) self.assertGreaterEqual(len(tokenizer), len(vocab_dict)) vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))] self.assertEqual(len(vocab), len(tokenizer)) tokenizer.add_tokens(["asdfasdfasdfasdf"]) vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))] self.assertEqual(len(vocab), len(tokenizer)) def test_save_and_load_tokenizer(self): tokenizer = self.get_tokenizer() tmpdirname = tempfile.mkdtemp() sample_ids = [0, 1, 4, 8, 9, 0, 12] before_tokens = tokenizer.decode(sample_ids) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.decode(sample_ids) after_vocab = after_tokenizer.get_vocab() self.assertEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) shutil.rmtree(tmpdirname) tokenizer = self.get_tokenizer() tmpdirname = tempfile.mkdtemp() before_len = len(tokenizer) sample_ids = [0, 1, 4, 8, 9, 0, 12, before_len, before_len + 1, before_len + 2] tokenizer.add_tokens(["?", "!"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("&") tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.decode(sample_ids) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.decode(sample_ids) after_vocab = after_tokenizer.get_vocab() self.assertEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) self.assertTrue(len(tokenizer), before_len + 3) self.assertTrue(len(tokenizer), len(after_tokenizer)) shutil.rmtree(tmpdirname) def test_tokenizer_slow_store_full_signature(self): signature = inspect.signature(self.tokenizer_class.__init__) tokenizer = self.get_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty: self.assertIn(parameter_name, tokenizer.init_kwargs) def test_zero_mean_unit_variance_normalization(self): tokenizer = self.get_tokenizer(do_normalize=True) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = tokenizer(speech_inputs, padding="longest") input_values = processed.input_values def _check_zero_mean_unit_variance(input_vector): self.assertTrue(np.abs(np.mean(input_vector)) < 1e-3) self.assertTrue(np.abs(np.var(input_vector) - 1) < 1e-3) _check_zero_mean_unit_variance(input_values[0, :800]) _check_zero_mean_unit_variance(input_values[1, :1000]) _check_zero_mean_unit_variance(input_values[2]) def test_return_attention_mask(self): speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] tokenizer = self.get_tokenizer() processed = tokenizer(speech_inputs) self.assertNotIn("attention_mask", processed) tokenizer = self.get_tokenizer(return_attention_mask=True) processed = tokenizer(speech_inputs, padding="longest") self.assertIn("attention_mask", processed) self.assertListEqual(list(processed.attention_mask.shape), list(processed.input_values.shape)) self.assertListEqual(processed.attention_mask.sum(-1).tolist(), [800, 1000, 1200]) @slow @require_torch def test_pretrained_checkpoints_are_set_correctly(self): for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: config = Wav2Vec2Config.from_pretrained(model_id) tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_id) self.assertEqual(tokenizer.return_attention_mask, config.feat_extract_norm == "layer") class Wav2Vec2CTCTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = Wav2Vec2CTCTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = "<pad> <s> </s> <unk> | E T A O N I H S R D L U M W C F G Y P B V K ' X J Q Z".split(" ") vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return Wav2Vec2CTCTokenizer.from_pretrained(self.tmpdirname, **kwargs) def test_tokenizer_add_token_chars(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-base-960h") tokenizer.add_tokens("x") token_ids = tokenizer("C x A").input_ids self.assertEqual(token_ids, [19, 4, 32, 4, 7]) tokenizer.add_tokens(["a", "b", "c"]) token_ids = tokenizer("C a A c").input_ids self.assertEqual(token_ids, [19, 4, 33, 4, 7, 4, 35]) tokenizer.add_tokens(["a", "b", "c"]) token_ids = tokenizer("CaA c").input_ids self.assertEqual(token_ids, [19, 33, 7, 4, 35]) def test_tokenizer_add_token_words(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-base-960h") tokenizer.add_tokens("xxx") token_ids = tokenizer("C xxx A B").input_ids self.assertEqual(token_ids, [19, 4, 32, 4, 7, 4, 24]) tokenizer.add_tokens(["aaa", "bbb", "ccc"]) token_ids = tokenizer("C aaa A ccc B B").input_ids self.assertEqual(token_ids, [19, 4, 33, 4, 7, 4, 35, 4, 24, 4, 24]) tokenizer.add_tokens(["aaa", "bbb", "ccc"]) token_ids = tokenizer("CaaaA ccc B B").input_ids self.assertEqual(token_ids, [19, 33, 7, 4, 35, 4, 24, 4, 24]) def test_tokenizer_decode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-base-960h") sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, tokenizer.word_delimiter_token_id, 24, 22, 5, 77], ] tokens = tokenizer.decode(sample_ids[0]) batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["HELLO<unk>", "BYE BYE<unk>"]) def test_tokenizer_decode_special(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-base-960h") sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, tokenizer.word_delimiter_token_id, 24, 22, 5, 77], ] sample_ids_2 = [ [11, 5, 5, 5, 5, 5, 15, 15, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, tokenizer.pad_token_id, tokenizer.pad_token_id, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 24, 22, 5, 77, tokenizer.word_delimiter_token_id], ] batch_tokens = tokenizer.batch_decode(sample_ids) batch_tokens_2 = tokenizer.batch_decode(sample_ids_2) self.assertEqual(batch_tokens, batch_tokens_2) self.assertEqual(batch_tokens, ["HELLO<unk>", "BYE BYE<unk>"]) def test_tokenizer_decode_added_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-base-960h") tokenizer.add_tokens(["!", "?"]) tokenizer.add_special_tokens({"cls_token": "$$$"}) sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 32, 32, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, 24, 22, 5, 77, tokenizer.pad_token_id, 34, 34], ] batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(batch_tokens, ["HELLO<unk>!?!?$$$", "BYE BYE<unk>$$$"]) def test_special_characters_in_vocab(self): sent = "ʈʰ æ æ̃ ˧ kʰ" vocab_dict = {k: v for v, k in enumerate(set(sent.split()))} vocab_file = os.path.join(self.tmpdirname, "vocab_special.json") with open(vocab_file, "w") as f: json.dump(vocab_dict, f) tokenizer = Wav2Vec2CTCTokenizer(vocab_file) expected_sent = tokenizer.decode(tokenizer(sent).input_ids, spaces_between_special_tokens=True) self.assertEqual(sent, expected_sent) tokenizer.save_pretrained(os.path.join(self.tmpdirname, "special_tokenizer")) tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(os.path.join(self.tmpdirname, "special_tokenizer")) expected_sent = tokenizer.decode(tokenizer(sent).input_ids, spaces_between_special_tokens=True) self.assertEqual(sent, expected_sent) @staticmethod def get_from_offsets(offsets, key): retrieved_list = [d[key] for d in offsets] return retrieved_list def test_offsets(self): tokenizer = self.get_tokenizer() sample_ids = [11, 5, 5, 5, 5, 5, 4, 4, 15, 15, 15, tokenizer.pad_token_id, 15, 8, 98] outputs_char = tokenizer.decode(sample_ids, output_char_offsets=True) self.assertEqual(len(outputs_char.keys()), 2) self.assertTrue("text" in outputs_char) self.assertTrue("char_offsets" in outputs_char) self.assertTrue(isinstance(outputs_char, Wav2Vec2CTCTokenizerOutput)) outputs_word = tokenizer.decode(sample_ids, output_word_offsets=True) self.assertEqual(len(outputs_word.keys()), 2) self.assertTrue("text" in outputs_word) self.assertTrue("word_offsets" in outputs_word) self.assertTrue(isinstance(outputs_word, Wav2Vec2CTCTokenizerOutput)) outputs = tokenizer.decode(sample_ids, output_char_offsets=True, output_word_offsets=True) self.assertEqual(len(outputs.keys()), 3) self.assertTrue("text" in outputs) self.assertTrue("char_offsets" in outputs) self.assertTrue("word_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2CTCTokenizerOutput)) self.assertEqual("".join(self.get_from_offsets(outputs["char_offsets"], "char")), outputs.text) self.assertEqual( self.get_from_offsets(outputs["char_offsets"], "char"), ["H", "E", " ", "L", "L", "O", "<unk>"] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "char"), self.get_from_offsets(outputs_char["char_offsets"], "char"), ) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"], "word")), outputs.text) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "word"), ["HE", "LLO<unk>"]) self.assertListEqual( self.get_from_offsets(outputs["word_offsets"], "word"), self.get_from_offsets(outputs_word["word_offsets"], "word"), ) self.assertListEqual(self.get_from_offsets(outputs["char_offsets"], "start_offset"), [0, 1, 6, 8, 12, 13, 14]) self.assertListEqual(self.get_from_offsets(outputs["char_offsets"], "end_offset"), [1, 6, 8, 11, 13, 14, 15]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "start_offset"), [0, 8]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "end_offset"), [6, 15]) def test_word_offsets_from_char_offsets(self): tokenizer = self.get_tokenizer() char_offsets = [ {"char": "H", "start_offset": 0, "end_offset": 1}, {"char": "I", "start_offset": 1, "end_offset": 2}, {"char": " ", "start_offset": 2, "end_offset": 3}, {"char": "L", "start_offset": 3, "end_offset": 4}, {"char": "I", "start_offset": 4, "end_offset": 5}, ] word_offsets = tokenizer._get_word_offsets(char_offsets, tokenizer.replace_word_delimiter_char) self.assertEqual( word_offsets, [{"word": "HI", "start_offset": 0, "end_offset": 2}, {"word": "LI", "start_offset": 3, "end_offset": 5}], ) char_offsets = [ {"char": " ", "start_offset": 0, "end_offset": 1}, {"char": "H", "start_offset": 1, "end_offset": 2}, {"char": "I", "start_offset": 2, "end_offset": 3}, {"char": " ", "start_offset": 3, "end_offset": 4}, {"char": " ", "start_offset": 4, "end_offset": 5}, {"char": "L", "start_offset": 5, "end_offset": 6}, {"char": "I", "start_offset": 6, "end_offset": 7}, {"char": "I", "start_offset": 7, "end_offset": 8}, {"char": " ", "start_offset": 8, "end_offset": 9}, {"char": " ", "start_offset": 9, "end_offset": 10}, ] word_offsets = tokenizer._get_word_offsets(char_offsets, tokenizer.replace_word_delimiter_char) self.assertEqual( word_offsets, [{"word": "HI", "start_offset": 1, "end_offset": 3}, {"word": "LII", "start_offset": 5, "end_offset": 8}], ) def test_offsets_batch(self): tokenizer = self.get_tokenizer() def check_list_tuples_equal(outputs_batch, outputs_list): self.assertTrue(isinstance(outputs_batch, Wav2Vec2CTCTokenizerOutput)) self.assertTrue(isinstance(outputs_list[0], Wav2Vec2CTCTokenizerOutput)) outputs_batch_2 = Wav2Vec2CTCTokenizerOutput({k: [d[k] for d in outputs_list] for k in outputs_list[0]}) self.assertListEqual(outputs_batch["text"], outputs_batch_2["text"]) def recursive_check(list_or_dict_1, list_or_dict_2): if isinstance(list_or_dict_1, list): [recursive_check(l1, l2) for l1, l2 in zip(list_or_dict_1, list_or_dict_2)] self.assertEqual(list_or_dict_1, list_or_dict_2) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"], outputs_batch_2["char_offsets"]) if "word_offsets" in outputs_batch: recursive_check(outputs_batch["word_offsets"], outputs_batch_2["word_offsets"]) sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] outputs_char_batch = tokenizer.batch_decode(sample_ids, output_char_offsets=True) outputs_char = [tokenizer.decode(ids, output_char_offsets=True) for ids in sample_ids] check_list_tuples_equal(outputs_char_batch, outputs_char) outputs_word_batch = tokenizer.batch_decode(sample_ids, output_word_offsets=True) outputs_word = [tokenizer.decode(ids, output_word_offsets=True) for ids in sample_ids] check_list_tuples_equal(outputs_word_batch, outputs_word) outputs_batch = tokenizer.batch_decode(sample_ids, output_char_offsets=True, output_word_offsets=True) outputs = [tokenizer.decode(ids, output_word_offsets=True, output_char_offsets=True) for ids in sample_ids] check_list_tuples_equal(outputs_batch, outputs) def test_offsets_integration(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-base-960h") pred_ids = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 11, 0, 0, 0, 22, 0, 0, 4, 4, 4, 14, 0, 0, 0, 0, 0, 8, 8, 0, 5, 5, 0, 12, 0, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 10, 0, 0, 0, 15, 0, 0, 10, 0, 0, 0, 12, 0, 0, 0, 0, 0, 7, 0, 9, 0, 0, 14, 0, 0, 0, 13, 0, 7, 0, 0, 4, 4, 0, 15, 8, 8, 0, 0, 8, 0, 26, 0, 0, 4, 4, 0, 0, 15, 0, 0, 0, 0, 0, 0, 10, 0, 26, 5, 5, 0, 4, 4, 0, 0, 12, 11, 0, 0, 5, 4, 4, 4, 0, 18, 0, 0, 0, 7, 9, 9, 0, 6, 0, 12, 12, 4, 4, 0, 6, 0, 0, 8, 0, 4, 4, 4, 0, 19, 0, 0, 8, 9, 9, 0, 0, 0, 0, 12, 12, 0, 0, 0, 0, 0, 0, 0, 16, 16, 0, 0, 17, 5, 5, 5, 0, 4, 4, 4, 0, 0, 29, 29, 0, 0, 0, 0, 8, 11, 0, 9, 9, 0, 0, 0, 4, 4, 0, 12, 12, 0, 0, 0, 9, 0, 0, 0, 0, 0, 8, 18, 0, 0, 0, 4, 4, 0, 0, 8, 9, 0, 4, 4, 0, 6, 11, 5, 0, 4, 4, 0, 13, 13, 0, 0, 0, 10, 0, 0, 25, 0, 0, 6, 0, 4, 4, 0, 0, 0, 0, 7, 0, 0, 23, 0, 0, 4, 4, 0, 0, 0, 6, 11, 0, 5, 4, 4, 18, 0, 0, 0, 0, 0, 0, 7, 15, 0, 0, 0, 15, 15, 0, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] time_offset_wav2vec2_base = 320 / 16_000 expected_char_time_stamps_text = ['W', 'H', 'Y', ' ', 'D', 'O', 'E', 'S', ' ', 'M', 'I', 'L', 'I', 'S', 'A', 'N', 'D', 'R', 'A', ' ', 'L', 'O', 'O', 'K', ' ', 'L', 'I', 'K', 'E', ' ', 'S', 'H', 'E', ' ', 'W', 'A', 'N', 'T', 'S', ' ', 'T', 'O', ' ', 'C', 'O', 'N', 'S', 'U', 'M', 'E', ' ', 'J', 'O', 'H', 'N', ' ', 'S', 'N', 'O', 'W', ' ', 'O', 'N', ' ', 'T', 'H', 'E', ' ', 'R', 'I', 'V', 'T', ' ', 'A', 'P', ' ', 'T', 'H', 'E', ' ', 'W', 'A', 'L', 'L', ' '] expected_char_time_stamps_start = [1.42, 1.44, 1.52, 1.58, 1.64, 1.76, 1.82, 1.88, 1.92, 2.26, 2.32, 2.4, 2.46, 2.54, 2.66, 2.7, 2.76, 2.84, 2.88, 2.94, 3.0, 3.02, 3.1, 3.14, 3.2, 3.28, 3.42, 3.46, 3.48, 3.54, 3.62, 3.64, 3.7, 3.72, 3.8, 3.88, 3.9, 3.96, 4.0, 4.04, 4.1, 4.16, 4.2, 4.28, 4.34, 4.36, 4.48, 4.66, 4.74, 4.76, 4.84, 4.94, 5.06, 5.08, 5.12, 5.22, 5.28, 5.38, 5.5, 5.52, 5.6, 5.68, 5.7, 5.74, 5.8, 5.82, 5.84, 5.88, 5.94, 6.04, 6.1, 6.16, 6.2, 6.32, 6.38, 6.44, 6.54, 6.56, 6.6, 6.62, 6.66, 6.8, 6.82, 6.9, 6.96] expected_char_time_stamps_end = [1.44, 1.46, 1.54, 1.64, 1.66, 1.8, 1.86, 1.9, 2.06, 2.28, 2.34, 2.42, 2.48, 2.56, 2.68, 2.72, 2.78, 2.86, 2.9, 2.98, 3.02, 3.06, 3.12, 3.16, 3.24, 3.3, 3.44, 3.48, 3.52, 3.58, 3.64, 3.66, 3.72, 3.78, 3.82, 3.9, 3.94, 3.98, 4.04, 4.08, 4.12, 4.18, 4.26, 4.3, 4.36, 4.4, 4.52, 4.7, 4.76, 4.82, 4.9, 4.98, 5.08, 5.1, 5.16, 5.26, 5.32, 5.4, 5.52, 5.54, 5.64, 5.7, 5.72, 5.78, 5.82, 5.84, 5.86, 5.92, 5.98, 6.06, 6.12, 6.18, 6.24, 6.34, 6.4, 6.48, 6.56, 6.58, 6.62, 6.66, 6.68, 6.82, 6.84, 6.94, 7.02] expected_word_time_stamps_text = ['WHY', 'DOES', 'MILISANDRA', 'LOOK', 'LIKE', 'SHE', 'WANTS', 'TO', 'CONSUME', 'JOHN', 'SNOW', 'ON', 'THE', 'RIVT', 'AP', 'THE', 'WALL'] expected_word_time_stamps_start = [1.42, 1.64, 2.26, 3.0, 3.28, 3.62, 3.8, 4.1, 4.28, 4.94, 5.28, 5.68, 5.8, 5.94, 6.32, 6.54, 6.66] expected_word_time_stamps_end = [1.54, 1.9, 2.9, 3.16, 3.52, 3.72, 4.04, 4.18, 4.82, 5.16, 5.54, 5.72, 5.86, 6.18, 6.4, 6.62, 6.94] output = tokenizer.batch_decode(pred_ids, output_char_offsets=True, output_word_offsets=True) char_offsets_text = self.get_from_offsets(output["char_offsets"][0], "char") char_offsets_start = self.get_from_offsets(output["char_offsets"][0], "start_offset") char_offsets_end = self.get_from_offsets(output["char_offsets"][0], "end_offset") word_offsets_text = self.get_from_offsets(output["word_offsets"][0], "word") word_offsets_start = self.get_from_offsets(output["word_offsets"][0], "start_offset") word_offsets_end = self.get_from_offsets(output["word_offsets"][0], "end_offset") char_time_stamps_start = [round(c * time_offset_wav2vec2_base, 2) for c in char_offsets_start] char_time_stamps_end = [round(c * time_offset_wav2vec2_base, 2) for c in char_offsets_end] word_time_stamps_start = [round(w * time_offset_wav2vec2_base, 2) for w in word_offsets_start] word_time_stamps_end = [round(w * time_offset_wav2vec2_base, 2) for w in word_offsets_end] self.assertListEqual(expected_char_time_stamps_text, char_offsets_text) self.assertListEqual(expected_char_time_stamps_start, char_time_stamps_start) self.assertListEqual(expected_char_time_stamps_end, char_time_stamps_end) self.assertListEqual(expected_word_time_stamps_text, word_offsets_text) self.assertListEqual(expected_word_time_stamps_start, word_time_stamps_start) self.assertListEqual(expected_word_time_stamps_end, word_time_stamps_end) def test_pretrained_model_lists(self): pass def test_add_tokens_tokenizer(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) new_toks_2 = { "eos_token": AddedToken(">>>>|||<||<<|<<", lstrip=False, rstrip=False), "pad_token": AddedToken("<<<<<|||>|>>>>|>", rstrip=False, lstrip=False), } added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) tokens = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokens[-4]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-3], tokenizer.pad_token_id) @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def test_tf_encode_plus_sent_to_model(self): pass @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def test_torch_encode_plus_sent_to_model(self): pass def test_convert_tokens_to_string_format(self): tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["T", "H", "I", "S", "|", "I", "S", "|", "A", "|", "T", "E", "X", "T"] output = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(output["text"], str) def test_nested_vocab(self): eng_vocab = {"a": 7, "b": 8} spa_vocab = {"a": 23, "c": 88} ita_vocab = {"a": 6, "d": 9} nested_vocab = {"eng": eng_vocab, "spa": spa_vocab, "ita": ita_vocab} def check_tokenizer(tokenizer, check_ita_first=False): if check_ita_first: self.assertEqual(tokenizer.decode([6, 9, 9]), "ad") self.assertEqual(tokenizer.encoder, ita_vocab) tokenizer.set_target_lang("eng") self.assertEqual(tokenizer.encoder, eng_vocab) self.assertEqual(tokenizer.decode([7, 8, 7]), "aba") tokenizer.set_target_lang("spa") self.assertEqual(tokenizer.decode([23, 88, 23]), "aca") self.assertEqual(tokenizer.encoder, spa_vocab) tokenizer.set_target_lang("eng") self.assertEqual(tokenizer.encoder, eng_vocab) self.assertEqual(tokenizer.decode([7, 7, 8]), "ab") tokenizer.set_target_lang("ita") self.assertEqual(tokenizer.decode([6, 9, 9]), "ad") self.assertEqual(tokenizer.encoder, ita_vocab) with tempfile.TemporaryDirectory() as tempdir: tempfile_path = os.path.join(tempdir, "vocab.json") with open(tempfile_path, "w") as temp_file: json.dump(nested_vocab, temp_file) tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(tempdir, target_lang="eng") check_tokenizer(tokenizer) with tempfile.TemporaryDirectory() as tempdir: tokenizer.save_pretrained(tempdir) tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(tempdir) self.assertEqual(tokenizer.target_lang, "ita") check_tokenizer(tokenizer, check_ita_first=True)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch wav2vec2conformer model import math import tempfile import unittest import numpy as np from datasets import loaddataset from transformers import wav2vec2conformerconfig istorchavailable from transformers testingutils import isptflaxcrosstest requiretorch requiretorchaccelerator requiretorchfp16 slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import wav2vec2conformerforaudioframeclassification wav2vec2conformerforctc wav2vec2conformerforpretraining wav2vec2conformerforsequenceclassification wav2vec2conformerforxvector wav2vec2conformermodel wav2vec2featureextractor wav2vec2processor from transformers models wav2vec2conformer modelingwav2vec2conformer import wav2vec2conformergumbelvectorquantizer computemaskindices samplenegativeindices class wav2vec2conformermodeltester def init self parent batchsize13 seqlength1024 speech is longer istrainingfalse hiddensize16 featextractnormgroup featextractdropout0 0 featextractactivationgelu convdim32 32 32 convstride4 4 4 convkernel8 8 8 convbiasfalse numconvposembeddings16 numconvposembeddinggroups2 numhiddenlayers2 numattentionheads2 hiddendropoutprob0 1 intermediatesize20 layernormeps1e5 hiddenactgelu initializerrange0 02 masktimeprob0 5 masktimelength2 vocabsize32 dostablelayernormfalse numadapterlayers1 adapterstride2 tdnndim32 32 tdnnkernel5 3 tdnndilation1 2 xvectoroutputdim32 positionembeddingstyperelative scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self hiddensize hiddensize self featextractnorm featextractnorm self featextractdropout featextractdropout self featextractactivation featextractactivation self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddendropoutprob hiddendropoutprob self intermediatesize intermediatesize self layernormeps layernormeps self hiddenact hiddenact self initializerrange initializerrange self vocabsize vocabsize self dostablelayernorm dostablelayernorm self numadapterlayers numadapterlayers self adapterstride adapterstride self masktimeprob masktimeprob self masktimelength masktimelength self scope scope self tdnndim tdnndim self tdnnkernel tdnnkernel self tdnndilation tdnndilation self xvectoroutputdim xvectoroutputdim self positionembeddingstype positionembeddingstype outputseqlength self seqlength for kernel stride in zipself convkernel self convstride outputseqlength outputseqlength kernel 1 stride self outputseqlength intmath ceiloutputseqlength self encoderseqlength self outputseqlength self adapteroutputseqlength self outputseqlength 1 adapterstride 1 def prepareconfigandinputsself positionembeddingstyperelative inputvalues floatstensorself batchsize self seqlength self vocabsize attentionmask randomattentionmaskself batchsize self seqlength config self getconfigpositionembeddingstypepositionembeddingstype return config inputvalues attentionmask def getconfigself positionembeddingstyperelative return wav2vec2conformerconfig hiddensizeself hiddensize featextractnormself featextractnorm featextractdropoutself featextractdropout featextractactivationself featextractactivation convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias masktimeprobself masktimeprob masktimelengthself masktimelength numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddendropoutprobself hiddendropoutprob intermediatesizeself intermediatesize layernormepsself layernormeps dostablelayernormself dostablelayernorm hiddenactself hiddenact initializerrangeself initializerrange vocabsizeself vocabsize numadapterlayersself numadapterlayers adapterstrideself adapterstride tdnndimself tdnndim tdnnkernelself tdnnkernel tdnndilationself tdnndilation xvectoroutputdimself xvectoroutputdim positionembeddingstypepositionembeddingstype def createandcheckmodelself config inputvalues attentionmask model wav2vec2conformermodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckmodelwithadapterself config inputvalues attentionmask config addadapter true model wav2vec2conformermodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self adapteroutputseqlength self hiddensize def createandcheckmodelwithadapterforctcself config inputvalues attentionmask config addadapter true config outputhiddensize 2 config hiddensize model wav2vec2conformerforctcconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result logits shape self batchsize self adapteroutputseqlength self vocabsize def createandcheckmodelwithadapterprojdimself config inputvalues attentionmask config addadapter true config outputhiddensize 8 model wav2vec2conformermodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self adapteroutputseqlength config outputhiddensize def createandcheckmodelfloat16self config inputvalues attentionmask model wav2vec2conformermodelconfigconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model wav2vec2conformermodel frompretrainedtmpdirname torchdtypetorch float16 model totorchdevice model eval with torch nograd result modelinputvalues typedtypetorch float16 attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckbatchinferenceself config inputvalues args test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 model wav2vec2conformermodelconfigconfig model totorchdevice model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch bool inputlengths inputvalues shape1 i for i in 4 2 1 pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 0 batchoutputs modelinputvalues attentionmaskattentionmask lasthiddenstate for i in rangeinputvalues shape0 inputslice inputvaluesi i 1 inputlengthsi output modelinputslice lasthiddenstate batchoutput batchoutputsi i 1 output shape1 self parent asserttruetorch allcloseoutput batchoutput atol1e3 def checkctclossself config inputvalues args model wav2vec2conformerforctcconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 minmaxlengthlabels 1 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 model config ctclossreduction sum sumloss modelinputvalues attentionmaskattentionmask labelslabels loss item model config ctclossreduction mean meanloss modelinputvalues attentionmaskattentionmask labelslabels loss item self parent asserttrueisinstancesumloss float self parent asserttrueisinstancemeanloss float def checkseqclassifierlossself config inputvalues args model wav2vec2conformerforsequenceclassificationconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 maskedloss modelinputvalues attentionmaskattentionmask labelslabels loss item unmaskedloss modelinputvalues labelslabels loss item self parent asserttrueisinstancemaskedloss float self parent asserttrueisinstanceunmaskedloss float self parent asserttruemaskedloss unmaskedloss def checkctctrainingself config inputvalues args config ctczeroinfinity true model wav2vec2conformerforctcconfigconfig model totorchdevice model train freeze feature encoder model freezefeatureencoder inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 if maxlengthlabelsi labels shape1 it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf labelsi maxlengthlabelsi 1 100 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkseqclassifiertrainingself config inputvalues args config ctczeroinfinity true model wav2vec2conformerforsequenceclassificationconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkxvectortrainingself config inputvalues args config ctczeroinfinity true model wav2vec2conformerforxvectorconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checklabelsoutofvocabself config inputvalues args model wav2vec2conformerforctcconfig model totorchdevice model train inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize 100 with self parent assertraisesvalueerror modelinputvalues labelslabels def prepareconfigandinputsforcommonself config inputvalues attentionmask self prepareconfigandinputs inputsdict inputvalues inputvalues attentionmask attentionmask return config inputsdict requiretorch class wav2vec2conformermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses wav2vec2conformerforctc wav2vec2conformermodel wav2vec2conformerforsequenceclassification wav2vec2conformerforpretraining wav2vec2conformerforaudioframeclassification wav2vec2conformerforxvector if istorchavailable else pipelinemodelmapping audioclassification wav2vec2conformerforsequenceclassification automaticspeechrecognition wav2vec2conformerforctc featureextraction wav2vec2conformermodel if istorchavailable else testpruning false testheadmasking false testtorchscript false def setupself self modeltester wav2vec2conformermodeltesterself self configtester configtesterself configclasswav2vec2conformerconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testmodelwithrelativeself configandinputs self modeltester prepareconfigandinputspositionembeddingstyperelative self modeltester createandcheckmodelconfigandinputs def testmodelwithrotaryself configandinputs self modeltester prepareconfigandinputspositionembeddingstyperotary self modeltester createandcheckmodelconfigandinputs def testmodelwithnorelposself configandinputs self modeltester prepareconfigandinputspositionembeddingstypenone self modeltester createandcheckmodelconfigandinputs def testmodelwithadapterself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterconfigandinputs def testmodelwithadapterforctcself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterforctcconfigandinputs def testmodelwithadapterprojdimself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelwithadapterprojdimconfigandinputs requiretorchaccelerator requiretorchfp16 def testmodelfloat16withrelativeself configandinputs self modeltester prepareconfigandinputspositionembeddingstyperelative self modeltester createandcheckmodelfloat16configandinputs requiretorchaccelerator requiretorchfp16 def testmodelfloat16withrotaryself configandinputs self modeltester prepareconfigandinputspositionembeddingstyperotary self modeltester createandcheckmodelfloat16configandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testxvectortrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkxvectortrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs wav2vec2conformer has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass wav2vec2conformer cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass wav2vec2conformer has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass isptflaxcrosstest nonrobust architecture does not exist in flax def testequivalenceflaxtoptself pass isptflaxcrosstest nonrobust architecture does not exist in flax def testequivalencepttoflaxself pass def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 attentions outputs attentions0 hiddenstates retaingrad attentions retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad self assertisnotnoneattentions grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias posbiasv posbiasu pointwiseconv1 pointwiseconv2 featureprojection projection weight featureprojection projection bias objective weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule posbiasu and module posbiasu is not none module posbiasu data fill3 if hasattrmodule posbiasv and module posbiasv is not none module posbiasv data fill3 if hasattrmodule codevectors and module codevectors is not none module codevectors data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 def testmaskfeatureprobctcself model wav2vec2conformerforctc frompretrained hfinternaltestingtinyrandomwav2vec2conformer maskfeatureprob0 2 maskfeaturelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2conformer returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 def testmasktimeprobctcself model wav2vec2conformerforctc frompretrained hfinternaltestingtinyrandomwav2vec2conformer masktimeprob0 2 masktimelength2 model totorchdevice train processor wav2vec2processor frompretrained hfinternaltestingtinyrandomwav2vec2conformer returnattentionmasktrue batchdurationinseconds 1 3 2 6 inputfeatures np random random16000 s for s in batchdurationinseconds batch processor inputfeatures paddingtrue samplingrateprocessor featureextractor samplingrate returntensorspt logits model inputvaluesbatchinputvalues totorchdevice attentionmaskbatchattentionmask totorchdevice logits self assertequallogits shape 4 1498 32 unittest skipreasonfeed forward chunking is not implemented def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model wav2vec2conformermodel frompretrainedfacebookwav2vec2conformerrelposlarge self assertisnotnonemodel requiretorch class wav2vec2conformerutilstestunittest testcase def testcomputemaskindicesself batchsize 4 sequencelength 60 maskprob 0 5 masklength 1 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice self assertlistequalmask sumaxis1 tolist maskprob sequencelength for in rangebatchsize def testcomputemaskindiceslowprobself with these settings nummaskedspans0 5 which means probabilistic rounding ensures that in 5 out of 10 method calls nummaskedspans0 and in the other 5 out of 10 cases nummaskedspans1 ntrials 100 batchsize 4 sequencelength 100 maskprob 0 05 masklength 10 countdimensionsmasked 0 countdimensionsnotmasked 0 for in rangentrials mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice nummasks torch summask item if nummasks 0 countdimensionsmasked 1 else countdimensionsnotmasked 1 as we test for at least 10 masked dimension and at least 10 nonmasked dimension this test could fail with probability p100 coin flips at most 9 heads 1 66e18 self assertgreatercountdimensionsmasked intntrials 0 1 self assertgreatercountdimensionsnotmasked intntrials 0 1 def testcomputemaskindicesoverlapself batchsize 4 sequencelength 80 maskprob 0 5 masklength 4 mask computemaskindicesbatchsize sequencelength maskprob masklength mask torch fromnumpymask totorchdevice because of overlap mask don t have to add up exactly to maskprob sequencelength but have to be smaller or equal for batchsum in mask sumaxis1 self asserttrueintbatchsum maskprob sequencelength def testcomputemaskindicesattnmaskoverlapself batchsize 4 sequencelength 80 maskprob 0 5 masklength 4 attentionmask torch onesbatchsize sequencelength dtypetorch long devicetorchdevice attentionmask 2 sequencelength 2 0 mask computemaskindices batchsize sequencelength maskprob masklength attentionmaskattentionmask mask torch fromnumpymask totorchdevice for batchsum in mask sumaxis1 self asserttrueintbatchsum maskprob sequencelength self asserttruemask 2 sequencelength 2 sum 0 def testcomputemaskindicesshortaudioself batchsize 4 sequencelength 100 maskprob 0 05 masklength 10 attentionmask torch onesbatchsize sequencelength dtypetorch long devicetorchdevice force one example to be heavily padded attentionmask0 5 0 mask computemaskindices batchsize sequencelength maskprob masklength attentionmaskattentionmask minmasks2 make sure that nonpadded examples cannot be padded self assertfalsemask0attentionmask0 totorch bool cpu any def testcomputeperplexityself probs torch arange100 devicetorchdevice reshape2 5 10 100 ppl wav2vec2conformergumbelvectorquantizer computeperplexityprobs self asserttrueabsppl item 141 4291 1e3 mask half of the input mask torch ones2 devicetorchdevice dtypetorch bool mask0 0 ppl wav2vec2conformergumbelvectorquantizer computeperplexityprobs mask self asserttrueabsppl item 58 6757 1e3 def testsamplenegativesself batchsize 2 sequencelength 10 hiddensize 4 numnegatives 3 features torch arangesequencelength hiddensize devicetorchdevice hiddensize view sequencelength hiddensize each value in vector consits of same value features featuresnone expandbatchsize sequencelength hiddensize contiguous sample negative indices samplednegativeindices samplenegativeindicesbatchsize sequencelength numnegatives none samplednegativeindices torch fromnumpysamplednegativeindices totorchdevice negatives features view1 hiddensizesamplednegativeindices long view1 negatives negatives viewbatchsize sequencelength 1 hiddensize permute2 0 1 3 self asserttruenegatives shape numnegatives batchsize sequencelength hiddensize make sure no negatively sampled vector is actually a positive one for negative in negatives self asserttruenegative features 0 sum 0 0 make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hiddensize dim self asserttruenegatives uniquedim1 shape numnegatives batchsize sequencelength 1 def testsamplenegativeswithmaskself batchsize 2 sequencelength 10 hiddensize 4 numnegatives 3 second half of last input tensor is padded mask torch onesbatchsize sequencelength dtypetorch long devicetorchdevice mask1 sequencelength 2 0 features torch arangesequencelength hiddensize devicetorchdevice hiddensize view sequencelength hiddensize each value in vector consits of same value features featuresnone expandbatchsize sequencelength hiddensize contiguous replace masked feature vectors with 100 to test that those are not sampled features torch wheremask none expandfeatures shape bool features 100 sample negative indices samplednegativeindices samplenegativeindices batchsize sequencelength numnegatives mask cpu numpy samplednegativeindices torch fromnumpysamplednegativeindices totorchdevice negatives features view1 hiddensizesamplednegativeindices long view1 negatives negatives viewbatchsize sequencelength 1 hiddensize permute2 0 1 3 self asserttruenegatives 0 all item self asserttruenegatives shape numnegatives batchsize sequencelength hiddensize make sure no negatively sampled vector is actually a positive one for negative in negatives self asserttruenegative features 0 sum 0 0 make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hiddensize dim self asserttruenegatives uniquedim1 shape numnegatives batchsize sequencelength 1 requiretorch slow class wav2vec2conformermodelintegrationtestunittest testcase def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid filterlambda x xid in f1272141231000i for i in rangenumsamples speechsamples speechsamples numsamplesaudio return xarray for x in speechsamples def testinferencectcnormalbatchedrelposself model wav2vec2conformerforctc frompretrainedfacebookwav2vec2conformerrelposlarge960hft model totorchdevice processor wav2vec2processor frompretrained facebookwav2vec2conformerrelposlarge960hft dolowercasetrue inputspeech self loaddatasamples2 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice with torch nograd logits modelinputvalues logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist sweat covered brion s body trickling into the tight loincloth that was the only garment he wore self assertlistequalpredictedtrans expectedtranscriptions def testinferencectcnormalbatchedropeself model wav2vec2conformerforctc frompretrainedfacebookwav2vec2conformerropelarge960hft model totorchdevice processor wav2vec2processor frompretrained facebookwav2vec2conformerropelarge960hft dolowercasetrue inputspeech self loaddatasamples2 inputs processorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice with torch nograd logits modelinputvalues logits predictedids torch argmaxlogits dim1 predictedtrans processor batchdecodepredictedids expectedtranscriptions a man said to the universe sir i exist sweat covered brion s body trickling into the tight loin cloth that was the only garment he wore self assertlistequalpredictedtrans expectedtranscriptions def testinferencepretrainedself model wav2vec2conformerforpretraining frompretrainedfacebookwav2vec2conformerrelposlarge model totorchdevice featureextractor wav2vec2featureextractor frompretrained facebookwav2vec2conformerrelposlarge returnattentionmasktrue inputspeech self loaddatasamples2 inputsdict featureextractorinputspeech returntensorspt paddingtrue batchsize inputsdictinputvalues shape0 featureseqlength intmodel getfeatextractoutputlengthsinputsdictinputvalues shape1 featuresshape batchsize featureseqlength torch manualseed0 masktimeindices computemaskindices featuresshape model config masktimeprob model config masktimelength minmasks2 masktimeindices torch fromnumpymasktimeindices totorchdevice with torch nograd outputs model inputsdict inputvalues totorchdevice attentionmaskinputsdict attentionmask totorchdevice masktimeindicesmasktimeindices compute cosine similarity cosinesim torch cosinesimilarityoutputs projectedstates outputs projectedquantizedstates dim1 retrieve cosine sim of masked features cosinesimmasked cosinesimmasktimeindices now compare to randomly initialized model config wav2vec2conformerconfig frompretrainedfacebookwav2vec2conformerrelposlarge modelrand wav2vec2conformerforpretrainingconfig totorchdevice eval with torch nograd outputsrand modelrand inputsdict inputvalues totorchdevice attentionmaskinputsdict attentionmask totorchdevice masktimeindicesmasktimeindices compute cosine similarity cosinesimrand torch cosinesimilarity outputsrand projectedstates outputsrand projectedquantizedstates dim1 retrieve cosine sim of masked features cosinesimmaskedrand cosinesimrandmasktimeindices a pretrained wav2vec2conformer model has learned to predict the quantized latent states the cosine similarity between quantized states and predicted states 0 5 a random wav2vec2conformer model has not learned to predict the quantized latent states the cosine similarity between quantized states and predicted states is very likely 0 1 self asserttruecosinesimmasked mean item 5 cosinesimmaskedrand mean item 0 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch wav2vec2 conformer model speech is longer test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 pad input make sure that dropout is disabled pad input make sure that dropout is disabled pad input freeze feature encoder pad input it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf freeze everything but the classification head pad input freeze everything but the classification head pad input wav2vec2conformer has no inputs_embeds input_ids is renamed to input_values wav2vec2conformer cannot resize token embeddings since it has no tokens embeddings wav2vec2conformer has no inputs_embeds and thus the get_input_embeddings fn is not implemented non robust architecture does not exist in flax non robust architecture does not exist in flax no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common with these settings num_masked_spans 0 5 which means probabilistic rounding ensures that in 5 out of 10 method calls num_masked_spans 0 and in the other 5 out of 10 cases num_masked_spans 1 as we test for at least 10 masked dimension and at least 10 non masked dimension this test could fail with probability p 100 coin flips at most 9 heads 1 66e 18 because of overlap mask don t have to add up exactly to mask_prob sequence_length but have to be smaller or equal force one example to be heavily padded make sure that non padded examples cannot be padded mask half of the input each value in vector consits of same value sample negative indices make sure no negatively sampled vector is actually a positive one make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hidden_size dim second half of last input tensor is padded each value in vector consits of same value replace masked feature vectors with 100 to test that those are not sampled sample negative indices make sure no negatively sampled vector is actually a positive one make sure that full vectors are sampled and not values of vectors this means that unique yields a single value for hidden_size dim automatic decoding with librispeech compute cosine similarity retrieve cosine sim of masked features now compare to randomly initialized model compute cosine similarity retrieve cosine sim of masked features a pretrained wav2vec2_conformer model has learned to predict the quantized latent states the cosine similarity between quantized states and predicted states 0 5 a random wav2vec2_conformer model has not learned to predict the quantized latent states the cosine similarity between quantized states and predicted states is very likely 0 1
import math import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import Wav2Vec2ConformerConfig, is_torch_available from transformers.testing_utils import ( is_pt_flax_cross_test, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForPreTraining, Wav2Vec2ConformerForSequenceClassification, Wav2Vec2ConformerForXVector, Wav2Vec2ConformerModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer import ( Wav2Vec2ConformerGumbelVectorQuantizer, _compute_mask_indices, _sample_negative_indices, ) class Wav2Vec2ConformerModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, position_embeddings_type="relative", scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.position_embeddings_type = position_embeddings_type output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self, position_embeddings_type="relative"): input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config(position_embeddings_type=position_embeddings_type) return config, input_values, attention_mask def get_config(self, position_embeddings_type="relative"): return Wav2Vec2ConformerConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, position_embeddings_type=position_embeddings_type, ) def create_and_check_model(self, config, input_values, attention_mask): model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2ConformerForCTC(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_float16(self, config, input_values, attention_mask): model = Wav2Vec2ConformerModel(config=config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = Wav2Vec2ConformerModel.from_pretrained(tmpdirname, torch_dtype=torch.float16) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_values.type(dtype=torch.float16), attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Wav2Vec2ConformerForCTC(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Wav2Vec2ConformerForSequenceClassification(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ConformerForCTC(config=config) model.to(torch_device) model.train() model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ConformerForSequenceClassification(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ConformerForXVector(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Wav2Vec2ConformerForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Wav2Vec2ConformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Wav2Vec2ConformerForCTC, Wav2Vec2ConformerModel, Wav2Vec2ConformerForSequenceClassification, Wav2Vec2ConformerForPreTraining, Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2ConformerForSequenceClassification, "automatic-speech-recognition": Wav2Vec2ConformerForCTC, "feature-extraction": Wav2Vec2ConformerModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = Wav2Vec2ConformerModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2ConformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_no_rel_pos(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type=None) self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model_float16(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model_float16(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): pass @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "pos_bias_u") and module.pos_bias_u is not None: module.pos_bias_u.data.fill_(3) if hasattr(module, "pos_bias_v") and module.pos_bias_v is not None: module.pos_bias_v.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ConformerForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ConformerForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2ConformerModel.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") self.assertIsNotNone(model) @require_torch class Wav2Vec2ConformerUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) def test_compute_perplexity(self): probs = torch.arange(100, device=torch_device).reshape(2, 5, 10) / 100 ppl = Wav2Vec2ConformerGumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) mask = torch.ones((2,), device=torch_device, dtype=torch.bool) mask[0] = 0 ppl = Wav2Vec2ConformerGumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @slow class Wav2Vec2ConformerModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter(lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]) speech_samples = speech_samples[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_ctc_normal_batched_rel_pos(self): model = Wav2Vec2ConformerForCTC.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large-960h-ft") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained( "facebook/wav2vec2-conformer-rel-pos-large-960h-ft", do_lower_case=True ) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loincloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched_rope(self): model = Wav2Vec2ConformerForCTC.from_pretrained("facebook/wav2vec2-conformer-rope-large-960h-ft") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained( "facebook/wav2vec2-conformer-rope-large-960h-ft", do_lower_case=True ) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_pretrained(self): model = Wav2Vec2ConformerForPreTraining.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-conformer-rel-pos-large", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) cosine_sim_masked = cosine_sim[mask_time_indices] config = Wav2Vec2ConformerConfig.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") model_rand = Wav2Vec2ConformerForPreTraining(config).to(torch_device).eval() with torch.no_grad(): outputs_rand = model_rand( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) cosine_sim_rand = torch.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states, dim=-1 ) cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0)
codingutf8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the wav2vec2phoneme tokenizer import json import os import unittest from typing import tuple from transformers import wav2vec2phonemectctokenizer from transformers models wav2vec2 tokenizationwav2vec2 import vocabfilesnames from transformers models wav2vec2phoneme tokenizationwav2vec2phoneme import wav2vec2phonemectctokenizeroutput from transformers testingutils import requirephonemizer from testtokenizationcommon import tokenizertestermixin requirephonemizer class wav2vec2phonemectctokenizertesttokenizertestermixin unittest testcase tokenizerclass wav2vec2phonemectctokenizer testrusttokenizer false def setupself super setup vocab s pad s unk n s t l a i k d m e p o z f j v b i r w u a h e u y o e a ts o d l x t a o y i i5 s t n t ts r i 5 q s u5 i a5 i5 ja th 5 o d 5 th ts h m d v e t ei5 o5 on5 u5 i5 ai5 a kh 1 i2 t a ju 2 u2 o p i ou5 y5 u t uo5 d uo tsh i5 uei5 a i e o2 p k n ph u2 u y b 2 s ai 1 4 y a2 t iou u on a i2 u o ei2 iou2 c k y2 oe d y s on2 u ei iou5 dz r i 2 ts s y5 i u5 pf u i2 ou2 r2 f ai2 r u ua5 u b yu5 uo2 y5 l r i2 ou ua a a y5 d r ee u r5 i i u i t o1 ai uei e i ie ua2 1 o4 t o u1 n i1 au y2 u q y y k t i sx o uo t uai5 b u u2 d s yi d r oe i1 yu2 n i4 uei2 ts i 4 t e u4 e ts d x uai t a t e y2 c i 1 dd d i4 yi p d yu ua1 ua4 i ui iou1 a1 iou4 c i1 y2 e r4 i i1 r1 i u cc 1 i1 k oo x ou1 i4 ee y1 dz d d l uo1 i 4 i y5 a4 split vocabtokens dictzipvocab rangelenvocab self specialtokensmap padtoken pad unktoken unk bostoken s eostoken s self vocabfile os path joinself tmpdirname vocabfilesnamesvocabfile with openself vocabfile w encodingutf8 as fp fp writejson dumpsvocabtokens n overwrite since phonemes require specific creation def getcleansequenceself tokenizer withprefixspacefalse maxlength20 minlength5 tuplestr list toks i tokenizer decodei cleanuptokenizationspacesfalse for i in rangelentokenizer toks listfilterlambda t t0 tokenizer encodet1 dophonemizefalse toks if maxlength is not none and lentoks maxlength toks toks maxlength if minlength is not none and lentoks minlength and lentoks 0 while lentoks minlength toks toks toks toksstr t1 for t in toks toksids t0 for t in toks ensure consistency outputtxt tokenizer decodetoksids cleanuptokenizationspacesfalse if not in outputtxt and lentoksids 1 outputtxt tokenizer decodetoksids0 cleanuptokenizationspacesfalse tokenizer decodetoksids1 cleanuptokenizationspacesfalse if withprefixspace outputtxt outputtxt outputids tokenizer encodeoutputtxt addspecialtokensfalse return outputtxt outputids def gettokenizerself kwargs kwargs updateself specialtokensmap return wav2vec2phonemectctokenizer frompretrainedself tmpdirname kwargs def testtokenizeraddnewtokensself tokenizer self tokenizerclass frompretrainedfacebookwav2vec2lv60espeakcvft check adding a single token tokenizer addtokensxxx tokenids tokenizerm xxx dophonemizefalse inputids self assertequaltokenids 13 392 17 xxx should be last token tokenizer addtokensaaa bbb ccc tokenids tokenizerm aaa ccc dophonemizefalse inputids self assertequaltokenids 13 393 17 395 aaa and ccc should be after xxx and 2 after aaa tokenids tokenizerma c dophonemizefalse inputids self assertequaltokenids 3 200 mai should be unk 3 def testphonemizeself tokenizer self tokenizerclass frompretrainedfacebookwav2vec2lv60espeakcvft inputtext hello how are you phonemes tokenizer phonemizeinputtext phonemizerlangenus self assertequalphonemes h l o h a j u def testencodeself tokenizer self tokenizerclass frompretrainedfacebookwav2vec2lv60espeakcvft inputtext hello how are you phonemes tokenizer phonemizeinputtext phonemizerlangenus self assertequaltokenizerinputtext inputids tokenizerphonemes dophonemizefalse inputids def testencodedecodeself tokenizer self tokenizerclass frompretrainedfacebookwav2vec2lv60espeakcvft inputtext hello how are you phonemes tokenizer phonemizeinputtext phonemizerlangenus phonemesencdec tokenizer decodetokenizerinputtext inputids self assertequalphonemes phonemesencdec def testdecodeself tokenizer self tokenizerclass frompretrainedfacebookwav2vec2lv60espeakcvft sampleids 11 5 15 tokenizer padtokenid 15 8 98 24 22 5 24 22 5 77 tokens tokenizer decodesampleids0 batchtokens tokenizer batchdecodesampleids self assertequaltokens batchtokens0 self assertequalbatchtokens k s l j s j s o def testphonemizewithworddelself tokenizer self tokenizerclass frompretrained facebookwav2vec2lv60espeakcvft worddelimitertoken tokenizer addtokens inputtext hello how are you phonemes tokenizer phonemizeinputtext phonemizerlangenus self assertequalphonemes h l o h a j u def testencodewithdelself tokenizer self tokenizerclass frompretrained facebookwav2vec2lv60espeakcvft worddelimitertoken tokenizer addtokens inputtext hello how are you phonemes tokenizer phonemizeinputtext phonemizerlangenus self assertequaltokenizerinputtext inputids tokenizerphonemes dophonemizefalse inputids def testdecodewithdelself tokenizer self tokenizerclass frompretrained facebookwav2vec2lv60espeakcvft worddelimitertoken tokenizer addtokens fmt off sampleids 11 5 15 tokenizer padtokenid tokenizer worddelimitertokenid 15 8 tokenizer worddelimitertokenid 98 tokenizer worddelimitertokenid 24 22 tokenizer worddelimitertokenid 5 24 22 5 77 fmt on decode with worddeltoken filter tokens tokenizer decodesampleids0 batchtokens tokenizer batchdecodesampleids self assertequaltokens batchtokens0 self assertequalbatchtokens k s l j s j s o decode with no worddeltoken filter tokens tokenizer decodesampleids0 filterworddelimitertokenfalse batchtokens tokenizer batchdecodesampleids filterworddelimitertokenfalse self assertequaltokens batchtokens0 self assertequalbatchtokens k s l j s j s o def testencodedecodewithdelself tokenizer self tokenizerclass frompretrained facebookwav2vec2lv60espeakcvft worddelimitertoken tokenizer addtokens inputtext hello how are you phonemes tokenizer phonemizeinputtext phonemizerlangenus phonemesencdec tokenizer decodetokenizerinputtext inputids filterworddelimitertokenfalse self assertequalphonemes phonemesencdec def testencodedecodewithdelfilterself tokenizer self tokenizerclass frompretrained facebookwav2vec2lv60espeakcvft worddelimitertoken tokenizer addtokens inputtext hello how are you phonemes tokenizer phonemizeinputtext phonemizerlangenus phonemesencdec tokenizer decodetokenizerinputtext inputids filterworddelimitertokentrue self assertequal joinp strip for p in phonemes split strip phonemesencdec def testchangephonemizerlangself tokenizer self tokenizerclass frompretrained facebookwav2vec2lv60espeakcvft worddelimitertokennone inputtext hello how are you inputidsen tokenizerinputtext phonemizerlangenus inputids inputidsfr tokenizerinputtext phonemizerlangfrfr inputids self assertnotequalinputidsen inputidsfr texten tokenizer decodeinputidsen textfr tokenizer decodeinputidsfr self assertequaltexten h l o h a j u self assertequaltextfr l o h a a j u def testcaseinsensitiveself tokenizer self tokenizerclass frompretrainedfacebookwav2vec2lv60espeakcvft inputtextup hello how are you inputtextlow hello how are you inputidsup tokenizerinputtextup inputids inputidslow tokenizerinputtextlow inputids self assertequalinputidsup inputidslow def testtokenizerdecodeaddedtokensself tokenizer self tokenizerclass frompretrainedfacebookwav2vec2lv60espeakcvft tokenizer addtokens tokenizer addspecialtokensclstoken fmt off sampleids 11 5 15 tokenizer padtokenid 15 8 98 392 392 393 392 392 393 394 394 24 22 5 24 22 5 77 tokenizer padtokenid 394 394 fmt on batchtokens tokenizer batchdecodesampleids self assertequalbatchtokens k s l j s j s o staticmethod def getfromoffsetsoffsets key retrievedlist dkey for d in offsets return retrievedlist def testoffsetsself tokenizer self gettokenizerworddelimitertoken tokenizer addtokens fmt off kssspadpadlll k s l sampleids 11 5 5 5 15 15 tokenizer padtokenid 15 15 tokenizer worddelimitertokenid tokenizer padtokenid 15 8 8 8 tokenizer worddelimitertokenid 98 fmt on outputs tokenizer decodesampleids outputcharoffsetstrue filterworddelimitertokenfalse check wav2vec2ctctokenizeroutput keys for char self assertequallenoutputs keys 2 self asserttruetext in outputs self asserttruecharoffsets in outputs self asserttrueisinstanceoutputs wav2vec2phonemectctokenizeroutput check that order of chars is correct and identical for both outputs self assertequal joinself getfromoffsetsoutputscharoffsets char outputs text self assertlistequal self getfromoffsetsoutputscharoffsets char k s l check that offsets are actually correct for char 01 is 11 14 is 5 46 is first 15 67 is pad thus not shown 79 is second 15 910 is worddelimitertoken 1011 is pad thus not shown 1112 is third 15 1215 is 8 1516 is worddelimitertoken 1617 is 98 self assertlistequal self getfromoffsetsoutputscharoffsets startoffset 0 1 4 7 9 11 12 15 16 self assertlistequal self getfromoffsetsoutputscharoffsets endoffset 1 4 6 9 10 12 15 16 17 def testoffsetsbatchself tokenizer self gettokenizerworddelimitertoken def checklisttuplesequaloutputsbatch outputslist self asserttrueisinstanceoutputsbatch wav2vec2phonemectctokenizeroutput self asserttrueisinstanceoutputslist0 wav2vec2phonemectctokenizeroutput transform list to modeloutput outputsbatch2 wav2vec2phonemectctokenizeroutput k dk for d in outputslist for k in outputslist0 self assertlistequaloutputsbatchtext outputsbatch2text def recursivechecklistordict1 listordict2 if isinstancelistordict1 list recursivecheckl1 l2 for l1 l2 in ziplistordict1 listordict2 self assertequallistordict1 listordict2 if charoffsets in outputsbatch recursivecheckoutputsbatchcharoffsets outputsbatch2charoffsets fmt off sampleids 11 5 15 tokenizer padtokenid 15 4 8 98 32 32 32 32 4 33 tokenizer worddelimitertokenid 32 32 33 34 34 24 22 5 tokenizer worddelimitertokenid tokenizer worddelimitertokenid 24 22 22 22 4 5 77 tokenizer padtokenid 22 22 4 34 34 34 34 fmt on we assume that decode works as expected all we will check now is the output type is correct and the output is identical to decode char outputscharbatch tokenizer batchdecodesampleids outputcharoffsetstrue outputschar tokenizer decodeids outputcharoffsetstrue for ids in sampleids checklisttuplesequaloutputscharbatch outputschar unittest skipwav2vec2phonemetokenizer always lower cases letters to correctly map to phonemes def testaddedtokensdolowercaseself pass unittest skipwav2vec2phonemetokenizer always puts spaces between phonemes def testencodedecodewithspacesself pass unittest skipencodes to text to ids but decodes ids to phonemes not possible to have internal consistency def testinternalconsistencyself pass unittest skipwav2vec2phonememodel has no max model length no testing def testpretrainedmodellistsself pass overwrite common def testaddtokenstokenizerself tokenizers self gettokenizersdolowercasefalse for tokenizer in tokenizers with self subtestftokenizer class name vocabsize tokenizer vocabsize allsize lentokenizer self assertnotequalvocabsize 0 we usually have added tokens from the start in tests because our vocab fixtures are smaller than the original vocabs let s not assert this self assertequalvocabsize allsize newtoks aaaaa bbbbbb cccccccccdddddddd addedtoks tokenizer addtokensnewtoks vocabsize2 tokenizer vocabsize allsize2 lentokenizer self assertnotequalvocabsize2 0 self assertequalvocabsize vocabsize2 self assertequaladdedtoks lennewtoks self assertequalallsize2 allsize lennewtoks tokens tokenizer encodeaaaaa bbbbbb low cccccccccdddddddd l addspecialtokensfalse self assertgreaterequallentokens 4 self assertgreatertokens0 tokenizer vocabsize 1 self assertgreatertokens3 tokenizer vocabsize 1 newtoks2 eostoken padtoken addedtoks2 tokenizer addspecialtokensnewtoks2 vocabsize3 tokenizer vocabsize allsize3 lentokenizer self assertnotequalvocabsize3 0 self assertequalvocabsize vocabsize3 self assertequaladdedtoks2 lennewtoks2 self assertequalallsize3 allsize2 lennewtoks2 tokens tokenizer encode aaaaabbbbbb low cccccccccdddddddd l addspecialtokensfalse self assertgreaterequallentokens 6 self assertgreatertokens0 tokenizer vocabsize 1 self assertgreatertokens0 tokens1 self assertgreatertokens3 tokenizer vocabsize 1 self assertgreatertokens3 tokens4 self assertequaltokens0 tokenizer eostokenid self assertequaltokens3 tokenizer padtokenid unittest skipthe tokenizer shouldn t be used to encode input ids except for labels only to decode def testtfencodeplussenttomodelself pass unittest skipthe tokenizer shouldn t be used to encode input ids except for labels only to decode def testtorchencodeplussenttomodelself pass def testconverttokenstostringformatself the default common tokenizer tests assumes that the output of converttokenstostring is a string which is not the case for wav2vec2phonemectctokenizer tokenizers self gettokenizersfasttrue dolowercasetrue for tokenizer in tokenizers with self subtestftokenizer class name tokens s z t k s t output tokenizer converttokenstostringtokens self assertisinstanceoutputtext str coding utf 8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tests for the wav2vec2phoneme tokenizer overwrite since phonemes require specific creation toks_str t 1 for t in toks ensure consistency check adding a single token xxx should be last token aaa and ccc should be after xxx and 2 after aaa mai should be unk 3 fmt off fmt on decode with word_del_token filter decode with no word_del_token filter fmt off fmt on fmt off ksssɾɾ ɾɾ pad ɾɾ pad ɾlll ɭʲ k s ɾ ɾ ɾ l ɭʲ fmt on check wav2vec2ctctokenizeroutput keys for char check that order of chars is correct and identical for both outputs check that offsets are actually correct for char 0 1 is 11 1 4 is 5 4 6 is first 15 6 7 is pad thus not shown 7 9 is second 15 9 10 is word_delimiter_token 10 11 is pad thus not shown 11 12 is third 15 12 15 is 8 15 16 is word_delimiter_token 16 17 is 98 transform list to modeloutput fmt off fmt on we assume that decode works as expected all we will check now is the output type is correct and the output is identical to decode char overwrite common we usually have added tokens from the start in tests because our vocab fixtures are smaller than the original vocabs let s not assert this self assertequal vocab_size all_size the default common tokenizer tests assumes that the output of convert_tokens_to_string is a string which is not the case for wav2vec2phonemectctokenizer
import json import os import unittest from typing import Tuple from transformers import Wav2Vec2PhonemeCTCTokenizer from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES from transformers.models.wav2vec2_phoneme.tokenization_wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class Wav2Vec2PhonemeCTCTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = Wav2Vec2PhonemeCTCTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " 'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ' "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(" ") vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))] toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], do_phonemize=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks toks_ids = [t[0] for t in toks] output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return Wav2Vec2PhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **kwargs) def test_tokenizer_add_new_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") tokenizer.add_tokens("xxx") token_ids = tokenizer("m xxx ɪ", do_phonemize=False).input_ids self.assertEqual(token_ids, [13, 392, 17]) tokenizer.add_tokens(["aaa", "bbb", "ccc"]) token_ids = tokenizer("m aaa ɪ ccc", do_phonemize=False).input_ids self.assertEqual(token_ids, [13, 393, 17, 395]) token_ids = tokenizer("maɪ c", do_phonemize=False).input_ids self.assertEqual(token_ids, [3, 200]) def test_phonemize(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(phonemes, "h ə l oʊ h aʊ ɑːɹ j uː") def test_encode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids) def test_encode_decode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids) self.assertEqual(phonemes, phonemes_enc_dec) def test_decode(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] tokens = tokenizer.decode(sample_ids[0]) batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"]) def test_phonemize_with_word_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(phonemes, "h ə l oʊ | h aʊ | ɑːɹ | j uː |") def test_encode_with_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids) def test_decode_with_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] tokens = tokenizer.decode(sample_ids[0]) batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"]) tokens = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=False) batch_tokens = tokenizer.batch_decode(sample_ids, filter_word_delimiter_token=False) self.assertEqual(tokens, batch_tokens[0]) self.assertEqual(batch_tokens, ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"]) def test_encode_decode_with_del(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=False) self.assertEqual(phonemes, phonemes_enc_dec) def test_encode_decode_with_del_filter(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" ) tokenizer.add_tokens("|") input_text = "Hello how are you" phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us") phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=True) self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip(), phonemes_enc_dec) def test_change_phonemizer_lang(self): tokenizer = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token=None ) input_text = "Hello how are you" input_ids_en = tokenizer(input_text, phonemizer_lang="en-us").input_ids input_ids_fr = tokenizer(input_text, phonemizer_lang="fr-fr").input_ids self.assertNotEqual(input_ids_en, input_ids_fr) text_en = tokenizer.decode(input_ids_en) text_fr = tokenizer.decode(input_ids_fr) self.assertEqual(text_en, "h ə l oʊ h aʊ ɑːɹ j uː") self.assertEqual(text_fr, "ɛ l o h aʊ a ʁ j u") def test_case_insensitive(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_text_up = "Hello how Are you" input_text_low = "hello how are you" input_ids_up = tokenizer(input_text_up).input_ids input_ids_low = tokenizer(input_text_low).input_ids self.assertEqual(input_ids_up, input_ids_low) def test_tokenizer_decode_added_tokens(self): tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") tokenizer.add_tokens(["!", "?"]) tokenizer.add_special_tokens({"cls_token": "$$$"}) sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] batch_tokens = tokenizer.batch_decode(sample_ids) self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"]) @staticmethod def get_from_offsets(offsets, key): retrieved_list = [d[key] for d in offsets] return retrieved_list def test_offsets(self): tokenizer = self.get_tokenizer(word_delimiter_token="|") tokenizer.add_tokens("|") sample_ids = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] outputs = tokenizer.decode(sample_ids, output_char_offsets=True, filter_word_delimiter_token=False) self.assertEqual(len(outputs.keys()), 2) self.assertTrue("text" in outputs) self.assertTrue("char_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2PhonemeCTCTokenizerOutput)) self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"], "char")), outputs.text) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "char"), ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "start_offset"), [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"], "end_offset"), [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def test_offsets_batch(self): tokenizer = self.get_tokenizer(word_delimiter_token="|") def check_list_tuples_equal(outputs_batch, outputs_list): self.assertTrue(isinstance(outputs_batch, Wav2Vec2PhonemeCTCTokenizerOutput)) self.assertTrue(isinstance(outputs_list[0], Wav2Vec2PhonemeCTCTokenizerOutput)) outputs_batch_2 = Wav2Vec2PhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["text"], outputs_batch_2["text"]) def recursive_check(list_or_dict_1, list_or_dict_2): if isinstance(list_or_dict_1, list): [recursive_check(l1, l2) for l1, l2 in zip(list_or_dict_1, list_or_dict_2)] self.assertEqual(list_or_dict_1, list_or_dict_2) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"], outputs_batch_2["char_offsets"]) sample_ids = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] outputs_char_batch = tokenizer.batch_decode(sample_ids, output_char_offsets=True) outputs_char = [tokenizer.decode(ids, output_char_offsets=True) for ids in sample_ids] check_list_tuples_equal(outputs_char_batch, outputs_char) @unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes") def test_added_tokens_do_lower_case(self): pass @unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes") def test_encode_decode_with_spaces(self): pass @unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency") def test_internal_consistency(self): pass @unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing") def test_pretrained_model_lists(self): pass def test_add_tokens_tokenizer(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) tokens = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokens[-4]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-3], tokenizer.pad_token_id) @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def test_tf_encode_plus_sent_to_model(self): pass @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def test_torch_encode_plus_sent_to_model(self): pass def test_convert_tokens_to_string_format(self): tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] output = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(output["text"], str)
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license load decoder from hub tokenizer feature extractor decoder make sure that error is thrown when decoder alphabet doesn t match decoder add token to trigger raise note pool should be instantiated after wav2vec2processorwithlm otherwise the lm won t be available to the pool s subprocesses manual logic used to allow parameterized test for both poolnone and poolpool test that only decoder relevant files from https huggingface cohfinternaltestingprocessorwithlmtreemain are downloaded and none of the rest e g readme md test that both decoder form hub and local files in cache are the same check wav2vec2ctctokenizeroutput keys for word check wav2vec2ctctokenizeroutput keys for word output words output times fmt off fmt on 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license load decoder from hub tokenizer feature extractor decoder make sure that error is thrown when decoder alphabet doesn t match decoder add token to trigger raise note pool should be instantiated after wav2vec2processorwithlm otherwise the lm won t be available to the pool s sub processes manual logic used to allow parameterized test for both pool none and pool pool test that only decoder relevant files from https huggingface co hf internal testing processor_with_lm tree main are downloaded and none of the rest e g readme md test that both decoder form hub and local files in cache are the same check wav2vec2ctctokenizeroutput keys for word check wav2vec2ctctokenizeroutput keys for word output words output times fmt off fmt on
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wav2vec2.test_feature_extraction_wav2vec2 import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm import Wav2Vec2DecoderWithLMOutput if is_torch_available(): from transformers import Wav2Vec2ForCTC @require_pyctcdecode class Wav2Vec2ProcessorWithLMTest(unittest.TestCase): def setUp(self): vocab = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.add_kwargs_tokens_map = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "return_attention_mask": False, "do_normalize": True, } self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") self.decoder_name = "hf-internal-testing/ngram-beam-search-decoder" def get_tokenizer(self, **kwargs_init): kwargs = self.add_kwargs_tokens_map.copy() kwargs.update(kwargs_init) return Wav2Vec2CTCTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return Wav2Vec2FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def get_decoder(self, **kwargs): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) processor.save_pretrained(self.tmpdirname) processor = Wav2Vec2ProcessorWithLM.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, Wav2Vec2CTCTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, Wav2Vec2FeatureExtractor) self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, BeamSearchDecoderCTC) def test_save_load_pretrained_additional_features(self): processor = Wav2Vec2ProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname) processor = Wav2Vec2ProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) self.assertEqual(processor.language_model.alpha, 5.0) self.assertEqual(processor.language_model.beta, 3.0) self.assertEqual(processor.language_model.score_boundary, -7.0) self.assertEqual(processor.language_model.unk_score_offset, 3) def test_load_decoder_tokenizer_mismatch_content(self): tokenizer = self.get_tokenizer() tokenizer.add_tokens(["xx"]) with self.assertRaisesRegex(ValueError, "include"): Wav2Vec2ProcessorWithLM( tokenizer=tokenizer, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def _get_dummy_logits(self, shape=(2, 10, 16), seed=77): np.random.seed(seed) return np.random.rand(*shape) def test_decoder(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits(shape=(10, 16), seed=13) decoded_processor = processor.decode(logits) decoded_decoder = decoder.decode_beams(logits)[0] self.assertEqual(decoded_decoder[0], decoded_processor.text) self.assertEqual("</s> <s> </s>", decoded_processor.text) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score) @parameterized.expand([[None], ["fork"], ["spawn"]]) def test_decoder_batch(self, pool_context): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits() if pool_context is None: decoded_processor = processor.batch_decode(logits) else: with get_context(pool_context).Pool() as pool: decoded_processor = processor.batch_decode(logits, pool) logits_list = list(logits) with get_context("fork").Pool() as p: decoded_beams = decoder.decode_beams_batch(p, logits_list) texts_decoder, logit_scores_decoder, lm_scores_decoder = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0]) logit_scores_decoder.append(beams[0][-2]) lm_scores_decoder.append(beams[0][-1]) self.assertListEqual(texts_decoder, decoded_processor.text) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"], decoded_processor.text) self.assertListEqual(logit_scores_decoder, decoded_processor.logit_score) self.assertListEqual(lm_scores_decoder, decoded_processor.lm_score) def test_decoder_with_params(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits() beam_width = 15 beam_prune_logp = -20.0 token_min_logp = -4.0 decoded_processor_out = processor.batch_decode( logits, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, ) decoded_processor = decoded_processor_out.text logits_list = list(logits) with get_context("fork").Pool() as pool: decoded_decoder_out = decoder.decode_beams_batch( pool, logits_list, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, ) decoded_decoder = [d[0][0] for d in decoded_decoder_out] logit_scores = [d[0][2] for d in decoded_decoder_out] lm_scores = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(decoded_decoder, decoded_processor) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"], decoded_processor) self.assertTrue(np.array_equal(logit_scores, decoded_processor_out.logit_score)) self.assertTrue(np.allclose([-20.054, -18.447], logit_scores, atol=1e-3)) self.assertTrue(np.array_equal(lm_scores, decoded_processor_out.lm_score)) self.assertTrue(np.allclose([-15.554, -13.9474], lm_scores, atol=1e-3)) def test_decoder_with_params_of_lm(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits() alpha = 2.0 beta = 5.0 unk_score_offset = -20.0 lm_score_boundary = True decoded_processor_out = processor.batch_decode( logits, alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary, ) decoded_processor = decoded_processor_out.text logits_list = list(logits) decoder.reset_params( alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary, ) with get_context("fork").Pool() as pool: decoded_decoder_out = decoder.decode_beams_batch( pool, logits_list, ) decoded_decoder = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(decoded_decoder, decoded_processor) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"], decoded_processor) lm_model = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0) self.assertEqual(lm_model.beta, 5.0) self.assertEqual(lm_model.unk_score_offset, -20.0) self.assertEqual(lm_model.score_boundary, True) def test_decoder_download_ignores_files(self): processor = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") language_model = processor.decoder.model_container[processor.decoder._model_key] path_to_cached_dir = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute() downloaded_decoder_files = os.listdir(path_to_cached_dir) expected_decoder_files = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() self.assertListEqual(downloaded_decoder_files, expected_decoder_files) def test_decoder_local_files(self): local_dir = snapshot_download("hf-internal-testing/processor_with_lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained(local_dir) language_model = processor.decoder.model_container[processor.decoder._model_key] path_to_cached_dir = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute() local_decoder_files = os.listdir(local_dir) expected_decoder_files = os.listdir(path_to_cached_dir) local_decoder_files.sort() expected_decoder_files.sort() self.assertListEqual(local_decoder_files, expected_decoder_files) def test_processor_from_auto_processor(self): processor_wav2vec2 = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") processor_auto = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm") raw_speech = floats_list((3, 1000)) input_wav2vec2 = processor_wav2vec2(raw_speech, return_tensors="np") input_auto = processor_auto(raw_speech, return_tensors="np") for key in input_wav2vec2.keys(): self.assertAlmostEqual(input_wav2vec2[key].sum(), input_auto[key].sum(), delta=1e-2) logits = self._get_dummy_logits() decoded_wav2vec2 = processor_wav2vec2.batch_decode(logits) decoded_auto = processor_auto.batch_decode(logits) self.assertListEqual(decoded_wav2vec2.text, decoded_auto.text) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) @staticmethod def get_from_offsets(offsets, key): retrieved_list = [d[key] for d in offsets] return retrieved_list def test_offsets_integration_fast(self): processor = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") logits = self._get_dummy_logits()[0] outputs = processor.decode(logits, output_word_offsets=True) self.assertEqual(len(outputs.keys()), 4) self.assertTrue("text" in outputs) self.assertTrue("word_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2DecoderWithLMOutput)) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"], "word")), outputs.text) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "word"), ["<s>", "<s>", "</s>"]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "start_offset"), [0, 2, 4]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "end_offset"), [1, 3, 5]) def test_offsets_integration_fast_batch(self): processor = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") logits = self._get_dummy_logits() outputs = processor.batch_decode(logits, output_word_offsets=True) self.assertEqual(len(outputs.keys()), 4) self.assertTrue("text" in outputs) self.assertTrue("word_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2DecoderWithLMOutput)) self.assertListEqual( [" ".join(self.get_from_offsets(o, "word")) for o in outputs["word_offsets"]], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0], "word"), ["<s>", "<s>", "</s>"]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0], "start_offset"), [0, 2, 4]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0], "end_offset"), [1, 3, 5]) @slow @require_torch @require_torchaudio def test_word_time_stamp_integration(self): import torch ds = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="train", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) ds_iter = iter(ds) sample = next(ds_iter) processor = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") input_values = processor(sample["audio"]["array"], return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values).logits.cpu().numpy() output = processor.decode(logits[0], output_word_offsets=True) time_offset = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate word_time_stamps = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] EXPECTED_TEXT = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" EXPECTED_TEXT = "THE TRACK APPEARS ON THE COMPILATION ALBUM CRAFT FORKS" self.assertEqual(" ".join(self.get_from_offsets(word_time_stamps, "word")), EXPECTED_TEXT) self.assertEqual(" ".join(self.get_from_offsets(word_time_stamps, "word")), output.text) start_times = torch.tensor(self.get_from_offsets(word_time_stamps, "start_time")) end_times = torch.tensor(self.get_from_offsets(word_time_stamps, "end_time")) expected_start_tensor = torch.tensor([0.6800, 0.8800, 1.1800, 1.8600, 1.9600, 2.1000, 3.0000, 3.5600, 3.9800]) expected_end_tensor = torch.tensor([0.7800, 1.1000, 1.6600, 1.9200, 2.0400, 2.8000, 3.3000, 3.8800, 4.2800]) self.assertTrue(torch.allclose(start_times, expected_start_tensor, atol=0.01)) self.assertTrue(torch.allclose(end_times, expected_end_tensor, atol=0.01))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch wavlm model import math import unittest import pytest from datasets import loaddataset from transformers import wavlmconfig istorchavailable from transformers testingutils import requiretorch requiretorchaudio slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import wav2vec2featureextractor wavlmforaudioframeclassification wavlmforctc wavlmforsequenceclassification wavlmforxvector wavlmmodel class wavlmmodeltester def init self parent batchsize13 seqlength1024 speech is longer istrainingfalse hiddensize16 featextractnormgroup featextractdropout0 0 featextractactivationgelu convdim32 32 32 convstride4 4 4 convkernel8 8 8 convbiasfalse numconvposembeddings16 numconvposembeddinggroups2 numhiddenlayers2 numattentionheads2 hiddendropoutprob0 1 this is most likely not correctly set yet intermediatesize20 layernormeps1e5 hiddenactgelu initializerrange0 02 vocabsize32 dostablelayernormfalse tdnndim32 32 tdnnkernel3 3 tdnndilation1 1 xvectoroutputdim32 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self hiddensize hiddensize self featextractnorm featextractnorm self featextractdropout featextractdropout self featextractactivation featextractactivation self convdim convdim self convstride convstride self convkernel convkernel self convbias convbias self numconvposembeddings numconvposembeddings self numconvposembeddinggroups numconvposembeddinggroups self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self hiddendropoutprob hiddendropoutprob self intermediatesize intermediatesize self layernormeps layernormeps self hiddenact hiddenact self initializerrange initializerrange self vocabsize vocabsize self dostablelayernorm dostablelayernorm self tdnndim tdnndim self tdnnkernel tdnnkernel self tdnndilation tdnndilation self xvectoroutputdim xvectoroutputdim self scope scope outputseqlength self seqlength for kernel stride in zipself convkernel self convstride outputseqlength outputseqlength kernel 1 stride self outputseqlength intmath ceiloutputseqlength self encoderseqlength self outputseqlength def prepareconfigandinputsself inputvalues floatstensorself batchsize self seqlength scale1 0 attentionmask randomattentionmaskself batchsize self seqlength config self getconfig return config inputvalues attentionmask def getconfigself return wavlmconfig hiddensizeself hiddensize featextractnormself featextractnorm featextractdropoutself featextractdropout featextractactivationself featextractactivation convdimself convdim convstrideself convstride convkernelself convkernel convbiasself convbias numconvposembeddingsself numconvposembeddings numconvposembeddinggroupsself numconvposembeddinggroups numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads hiddendropoutprobself hiddendropoutprob intermediatesizeself intermediatesize layernormepsself layernormeps hiddenactself hiddenact initializerrangeself initializerrange vocabsizeself vocabsize tdnndimself tdnndim tdnnkernelself tdnnkernel tdnndilationself tdnndilation xvectoroutputdimself xvectoroutputdim def createandcheckmodelself config inputvalues attentionmask model wavlmmodelconfigconfig model totorchdevice model eval result modelinputvalues attentionmaskattentionmask self parent assertequal result lasthiddenstate shape self batchsize self outputseqlength self hiddensize def createandcheckbatchinferenceself config inputvalues args test does not pass for models making use of groupnorm check https github compytorchfairseqissues3227 model wavlmmodelconfigconfig model totorchdevice model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch bool inputlengths inputvalues shape1 i for i in 4 2 1 pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 0 batchoutputs modelinputvalues attentionmaskattentionmask lasthiddenstate for i in rangeinputvalues shape0 inputslice inputvaluesi i 1 inputlengthsi output modelinputslice lasthiddenstate batchoutput batchoutputsi i 1 output shape1 self parent asserttruetorch allcloseoutput batchoutput atol1e3 def checkctclossself config inputvalues args model wavlmforctcconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 minmaxlengthlabels 1 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 model config ctclossreduction sum sumloss modelinputvalues attentionmaskattentionmask labelslabels loss item model config ctclossreduction mean meanloss modelinputvalues attentionmaskattentionmask labelslabels loss item self parent asserttrueisinstancesumloss float self parent asserttrueisinstancemeanloss float def checkseqclassifierlossself config inputvalues args model wavlmforsequenceclassificationconfigconfig model totorchdevice make sure that dropout is disabled model eval inputvalues inputvalues 3 attentionmask torch onesinputvalues shape devicetorchdevice dtypetorch long inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 attentionmaski inputlengthsi 0 maskedloss modelinputvalues attentionmaskattentionmask labelslabels loss item unmaskedloss modelinputvalues labelslabels loss item self parent asserttrueisinstancemaskedloss float self parent asserttrueisinstanceunmaskedloss float self parent asserttruemaskedloss unmaskedloss def checkctctrainingself config inputvalues args config ctczeroinfinity true model wavlmforctcconfigconfig model totorchdevice model train freeze feature encoder model freezefeatureencoder inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 if maxlengthlabelsi labels shape1 it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf labelsi maxlengthlabelsi 1 100 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checkseqclassifiertrainingself config inputvalues args config ctczeroinfinity true model wavlmforsequenceclassificationconfigconfig model totorchdevice model train freeze everything but the classification head model freezebasemodel inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 labels idstensorinputvalues shape0 1 lenmodel config id2label pad input for i in rangeleninputlengths inputvaluesi inputlengthsi 0 0 loss modelinputvalues labelslabels loss self parent assertfalsetorch isinfloss item loss backward def checklabelsoutofvocabself config inputvalues args model wavlmforctcconfig model totorchdevice model train inputvalues inputvalues 3 inputlengths inputvalues shape1 i for i in 4 2 1 maxlengthlabels model getfeatextractoutputlengthstorch tensorinputlengths labels idstensorinputvalues shape0 maxmaxlengthlabels 2 model config vocabsize 100 with pytest raisesvalueerror modelinputvalues labelslabels def prepareconfigandinputsforcommonself config inputvalues attentionmask self prepareconfigandinputs inputsdict inputvalues inputvalues attentionmask attentionmask return config inputsdict requiretorch class wavlmmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses wavlmforctc wavlmmodel wavlmforaudioframeclassification wavlmforsequenceclassification wavlmforxvector if istorchavailable else pipelinemodelmapping audioclassification wavlmforsequenceclassification automaticspeechrecognition wavlmforctc featureextraction wavlmmodel if istorchavailable else testpruning false testheadmasking false def setupself self modeltester wavlmmodeltesterself self configtester configtesterself configclasswavlmconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testctclossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkctclossconfigandinputs def testseqclassifierlossinferenceself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifierlossconfigandinputs def testctctrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkctctrainingconfigandinputs def testseqclassifiertrainself configandinputs self modeltester prepareconfigandinputs self modeltester checkseqclassifiertrainingconfigandinputs def testlabelsoutofvocabself configandinputs self modeltester prepareconfigandinputs self modeltester checklabelsoutofvocabconfigandinputs wavlm has no inputsembeds def testinputsembedsself pass inputids is renamed to inputvalues def testforwardsignatureself pass wavlm cannot resize token embeddings since it has no tokens embeddings def testresizetokensembeddingsself pass wavlm has no inputsembeds and thus the getinputembeddings fn is not implemented def testmodelcommonattributesself pass wavlm uses pytorch s multiheadattention class and thus can t retain gradients on attentions def testretaingradhiddenstatesattentionsself config inputsdict self modeltester prepareconfigandinputsforcommon config outputhiddenstates true config outputattentions true no need to test all models as different heads yield the same functionality modelclass self allmodelclasses0 model modelclassconfig model totorchdevice set layer drop to 0 model config layerdrop 0 0 inputvalues inputsdictinputvalues inputlengths torch tensor inputvalues shape1 for in rangeinputvalues shape0 dtypetorch long devicetorchdevice outputlengths model getfeatextractoutputlengthsinputlengths labels idstensorinputvalues shape0 outputlengths0 2 self modeltester vocabsize inputsdictattentionmask torch oneslikeinputsdictattentionmask inputsdictlabels labels outputs modelinputsdict output outputs0 encoderdecoderonly models hiddenstates outputs hiddenstates0 hiddenstates retaingrad output flatten0 backwardretaingraphtrue self assertisnotnonehiddenstates grad def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters uniforminitparms conv weight conv parametrizations weight maskedspecembed codevectors quantizer weightproj weight projecthid weight projecthid bias projectq weight projectq bias featureprojection projection weight featureprojection projection bias labelembeddingsconcat relattnembed objective weight if param requiresgrad if anyx in name for x in uniforminitparms self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized overwrite from testmodelingcommon def mockinitweightsself module if hasattrmodule weight and module weight is not none module weight data fill3 if hasattrmodule weightg and module weightg is not none module weightg data fill3 if hasattrmodule weightv and module weightv is not none module weightv data fill3 if hasattrmodule bias and module bias is not none module bias data fill3 if hasattrmodule codevectors and module codevectors is not none module codevectors data fill3 if hasattrmodule maskedspecembed and module maskedspecembed is not none module maskedspecembed data fill3 unittest skipreasonfeed forward chunking is not implemented for wavlm def testfeedforwardchunkingself pass slow def testmodelfrompretrainedself model wavlmmodel frompretrainedmicrosoftwavlmbaseplus self assertisnotnonemodel requiretorch requiretorchaudio slow class wavlmmodelintegrationtestunittest testcase def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid filter lambda x xid in f1272141231000i for i in rangenumsamples numsamplesaudio return xarray for x in speechsamples def loadsuperbself task numsamples ds loaddatasetantonlsuperbdummy task splittest return ds numsamples def testinferencebaseself model wavlmmodel frompretrainedmicrosoftwavlmbaseplus totorchdevice featureextractor wav2vec2featureextractor frompretrained microsoftwavlmbaseplus returnattentionmasktrue inputspeech self loaddatasamples2 inputs featureextractorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd hiddenstatesslice modelinputvalues attentionmaskattentionmask lasthiddenstate 2 2 cpu expectedhiddenstatesslice torch tensor 0 0577 0 1161 0 0579 0 1165 0 0199 0 1237 0 0059 0 0605 todo update the tolerance after the ci moves to torch 1 10 self asserttruetorch allclosehiddenstatesslice expectedhiddenstatesslice atol5e2 def testinferencelargeself model wavlmmodel frompretrainedmicrosoftwavlmlarge totorchdevice featureextractor wav2vec2featureextractor frompretrained microsoftwavlmlarge returnattentionmasktrue inputspeech self loaddatasamples2 inputs featureextractorinputspeech returntensorspt paddingtrue inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd hiddenstatesslice modelinputvalues attentionmaskattentionmask lasthiddenstate 2 2 cpu expectedhiddenstatesslice torch tensor 0 2122 0 0500 0 2118 0 0563 0 1353 0 1818 0 2453 0 0595 self asserttruetorch allclosehiddenstatesslice expectedhiddenstatesslice rtol5e2 def testinferencediarizationself model wavlmforaudioframeclassification frompretrainedmicrosoftwavlmbaseplussd totorchdevice processor wav2vec2featureextractor frompretrainedmicrosoftwavlmbaseplussd inputdata self loadsuperbsd 4 inputs processorinputdataspeech returntensorspt paddingtrue samplingrate16000 inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice with torch nograd outputs modelinputvalues attentionmaskattentionmask labels is a onehot array of shape numframes numspeakers labels outputs logits 0 long s3prl logits for the same batch expectedlogits torch tensor 5 9566 8 6554 5 7137 8 9386 5 7906 7 0973 5 7829 5 9999 5 2086 7 7878 4 8890 7 9312 4 2004 3 9101 5 4480 4 6932 4 6105 6 7178 5 1930 6 1635 2 6228 4 1123 2 7646 3 1576 4 4477 7 9206 3 9339 7 3707 4 9528 4 8242 3 6921 2 9687 devicetorchdevice self assertequallabels0 0 sum 258 self assertequallabels0 1 sum 647 todo update the tolerance after the ci moves to torch 1 10 self asserttruetorch allcloseoutputs logits 4 expectedlogits atol1e2 def testinferencespeakerverificationself model wavlmforxvector frompretrainedmicrosoftwavlmbaseplussv totorchdevice processor wav2vec2featureextractor frompretrainedmicrosoftwavlmbaseplussv inputdata self loadsuperbsi 4 inputs processorinputdataspeech returntensorspt paddingtrue labels torch tensor5 1 1 3 devicetorchdevice t with torch nograd inputvalues inputs inputvalues totorchdevice attentionmask inputs attentionmask totorchdevice outputs modelinputvalues attentionmaskattentionmask labelslabels embeddings torch nn functional normalizeoutputs embeddings dim1 cosinesim torch nn cosinesimilaritydim1 id10002 vs id10002 self assertalmostequalcosinesimembeddings1 embeddings2 item 0 9787 3 id10006 vs id10002 self assertalmostequalcosinesimembeddings0 embeddings1 item 0 5064 3 id10002 vs id10004 self assertalmostequalcosinesimembeddings2 embeddings3 item 0 4780 3 todo update the tolerance after the ci moves to torch 1 10 self assertalmostequaloutputs loss item 18 4154 2 coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch wavlm model speech is longer this is most likely not correctly set yet test does not pass for models making use of group_norm check https github com pytorch fairseq issues 3227 pad input make sure that dropout is disabled pad input make sure that dropout is disabled pad input freeze feature encoder pad input it s important that we make sure that target lengths are at least one shorter than logit lengths to prevent inf freeze everything but the classification head pad input wavlm has no inputs_embeds input_ids is renamed to input_values wavlm cannot resize token embeddings since it has no tokens embeddings wavlm has no inputs_embeds and thus the get_input_embeddings fn is not implemented wavlm uses pytorch s multi head attention class and thus can t retain gradients on attentions no need to test all models as different heads yield the same functionality set layer drop to 0 encoder decoder only models overwrite from test_modeling_common automatic decoding with librispeech todo update the tolerance after the ci moves to torch 1 10 labels is a one hot array of shape num_frames num_speakers s3prl logits for the same batch todo update the tolerance after the ci moves to torch 1 10 id10002 vs id10002 id10006 vs id10002 id10002 vs id10004 todo update the tolerance after the ci moves to torch 1 10
import math import unittest import pytest from datasets import load_dataset from transformers import WavLMConfig, is_torch_available from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Wav2Vec2FeatureExtractor, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, ) class WavLMModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, tdnn_dim=(32, 32), tdnn_kernel=(3, 3), tdnn_dilation=(1, 1), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return WavLMConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = WavLMModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): model = WavLMModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = WavLMForCTC(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = WavLMForSequenceClassification(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = WavLMForCTC(config=config) model.to(torch_device) model.train() model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = WavLMForSequenceClassification(config=config) model.to(torch_device) model.train() model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = WavLMForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class WavLMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (WavLMForCTC, WavLMModel, WavLMForAudioFrameClassification, WavLMForSequenceClassification, WavLMForXVector) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": WavLMForSequenceClassification, "automatic-speech-recognition": WavLMForCTC, "feature-extraction": WavLMModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = WavLMModelTester(self) self.config_tester = ConfigTester(self, config_class=WavLMConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) def test_inputs_embeds(self): pass def test_forward_signature(self): pass def test_resize_tokens_embeddings(self): pass def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "label_embeddings_concat", "rel_attn_embed", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented for WavLM") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = WavLMModel.from_pretrained("microsoft/wavlm-base-plus") self.assertIsNotNone(model) @require_torch @require_torchaudio @slow class WavLMModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_base(self): model = WavLMModel.from_pretrained("microsoft/wavlm-base-plus").to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "microsoft/wavlm-base-plus", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs = feature_extractor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): hidden_states_slice = ( model(input_values, attention_mask=attention_mask).last_hidden_state[:, -2:, -2:].cpu() ) EXPECTED_HIDDEN_STATES_SLICE = torch.tensor( [[[0.0577, 0.1161], [0.0579, 0.1165]], [[0.0199, 0.1237], [0.0059, 0.0605]]] ) self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, atol=5e-2)) def test_inference_large(self): model = WavLMModel.from_pretrained("microsoft/wavlm-large").to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "microsoft/wavlm-large", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs = feature_extractor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): hidden_states_slice = ( model(input_values, attention_mask=attention_mask).last_hidden_state[:, -2:, -2:].cpu() ) EXPECTED_HIDDEN_STATES_SLICE = torch.tensor( [[[0.2122, 0.0500], [0.2118, 0.0563]], [[0.1353, 0.1818], [0.2453, 0.0595]]] ) self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, rtol=5e-2)) def test_inference_diarization(self): model = WavLMForAudioFrameClassification.from_pretrained("microsoft/wavlm-base-plus-sd").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/wavlm-base-plus-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) labels = (outputs.logits > 0).long() expected_logits = torch.tensor( [ [[-5.9566, -8.6554], [-5.7137, -8.9386], [-5.7906, -7.0973], [-5.7829, -5.9999]], [[-5.2086, -7.7878], [-4.8890, -7.9312], [-4.2004, -3.9101], [-5.4480, -4.6932]], [[-4.6105, -6.7178], [-5.1930, -6.1635], [-2.6228, -4.1123], [-2.7646, -3.1576]], [[-4.4477, -7.9206], [-3.9339, -7.3707], [-4.9528, -4.8242], [-3.6921, -2.9687]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 258) self.assertEqual(labels[0, :, 1].sum(), 647) self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): model = WavLMForXVector.from_pretrained("microsoft/wavlm-base-plus-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/wavlm-base-plus-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1) cosine_sim = torch.nn.CosineSimilarity(dim=-1) self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).item(), 0.9787, 3) self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).item(), 0.5064, 3) self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.4780, 3) self.assertAlmostEqual(outputs.loss.item(), 18.4154, 2)
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license creates a random float32 tensor if rng is none rng globalrng values for batchidx in rangeshape0 values append for in rangeshape1 values1 appendrng random scale return values requiretorch requiretorchaudio class whisperfeatureextractiontesterunittest testcase def init self parent batchsize7 minseqlength400 maxseqlength2000 featuresize10 hoplength160 chunklength8 paddingvalue0 0 samplingrate4000 returnattentionmaskfalse donormalizetrue self parent parent self batchsize batchsize self minseqlength minseqlength self maxseqlength maxseqlength self seqlengthdiff self maxseqlength self minseqlength self batchsize 1 self paddingvalue paddingvalue self samplingrate samplingrate self returnattentionmask returnattentionmask self donormalize donormalize self featuresize featuresize self chunklength chunklength self hoplength hoplength def preparefeatextractdictself return featuresize self featuresize hoplength self hoplength chunklength self chunklength paddingvalue self paddingvalue samplingrate self samplingrate returnattentionmask self returnattentionmask donormalize self donormalize def prepareinputsforcommonself equallengthfalse numpifyfalse def flattenlistoflists return listitertools chainlistoflists if equallength speechinputs floatslistself maxseqlength self featuresize for in rangeself batchsize else make sure that inputs increase in size speechinputs floatslistx self featuresize for x in rangeself minseqlength self maxseqlength self seqlengthdiff if numpify speechinputs np asarrayx for x in speechinputs return speechinputs requiretorch requiretorchaudio class whisperfeatureextractiontestsequencefeatureextractiontestmixin unittest testcase featureextractionclass whisperfeatureextractor if isspeechavailable else none def setupself self featextracttester whisperfeatureextractiontesterself def testfeatextractfromandsavepretrainedself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname savedfile featextractfirst savepretrainedtmpdirname0 checkjsonfilehascorrectformatsavedfile featextractsecond self featureextractionclass frompretrainedtmpdirname dictfirst featextractfirst todict dictsecond featextractsecond todict mel1 featextractfirst melfilters mel2 featextractsecond melfilters self asserttruenp allclosemel1 mel2 self assertequaldictfirst dictsecond def testfeatextracttojsonfileself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname jsonfilepath os path jointmpdirname featextract json featextractfirst tojsonfilejsonfilepath featextractsecond self featureextractionclass fromjsonfilejsonfilepath dictfirst featextractfirst todict dictsecond featextractsecond todict mel1 featextractfirst melfilters mel2 featextractsecond melfilters self asserttruenp allclosemel1 mel2 self assertequaldictfirst dictsecond def testcallself tests that all call wrap to encodeplus and batchencodeplus featureextractor self featureextractionclassself featextracttester preparefeatextractdict create three inputs of length 800 1000 and 1200 speechinputs floatslist1 x0 for x in range800 1400 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs test feature size inputfeatures featureextractornpspeechinputs paddingmaxlength returntensorsnp inputfeatures self asserttrueinputfeatures ndim 3 self asserttrueinputfeatures shape1 featureextractor nbmaxframes self asserttrueinputfeatures shape2 featureextractor featuresize test not batched input encodedsequences1 featureextractorspeechinputs0 returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs0 returntensorsnp inputfeatures self asserttruenp allcloseencodedsequences1 encodedsequences2 atol1e3 test batched encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test 2d numpy arrays are batched speechinputs floatslist1 x0 for x in 800 800 800 npspeechinputs np asarrayspeechinputs encodedsequences1 featureextractorspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputs returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test truncation required speechinputs floatslist1 x0 for x in range200 featureextractor nsamples 500 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs speechinputstruncated x featureextractor nsamples for x in speechinputs npspeechinputstruncated np asarrayspeechinput for speechinput in speechinputstruncated encodedsequences1 featureextractornpspeechinputs returntensorsnp inputfeatures encodedsequences2 featureextractornpspeechinputstruncated returntensorsnp inputfeatures for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 def testdoubleprecisionpadself import torch featureextractor self featureextractionclassself featextracttester preparefeatextractdict npspeechinputs np random rand100 32 astypenp float64 pyspeechinputs npspeechinputs tolist for inputs in pyspeechinputs npspeechinputs npprocessed featureextractor padinputfeatures inputs returntensorsnp self asserttruenpprocessed inputfeatures dtype np float32 ptprocessed featureextractor padinputfeatures inputs returntensorspt self asserttrueptprocessed inputfeatures dtype torch float32 def loaddatasamplesself numsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples def testintegrationself fmt off expectedinputfeatures torch tensor 0 1193 0 0946 0 1098 0 0196 0 0225 0 0690 0 1736 0 0951 0 0971 0 0817 0 0702 0 0162 0 0260 0 0017 0 0192 0 1678 0 0709 0 1867 0 0655 0 0274 0 0234 0 1884 0 0516 0 0554 0 0274 0 1425 0 1423 0 0837 0 0377 0 0854 fmt on inputspeech self loaddatasamples1 featureextractor whisperfeatureextractor inputfeatures featureextractorinputspeech returntensorspt inputfeatures self assertequalinputfeatures shape 1 80 3000 self asserttruetorch allcloseinputfeatures0 0 30 expectedinputfeatures atol1e4 def testzeromeanunitvariancenormalizationtruncnplongestself featextract self featureextractionclassself featextracttester preparefeatextractdict audio self loaddatasamples10 audio audio audio min audio max audio min 65535 rescale to 0 65535 to show issue audio featextract zeromeanunitvarnormaudio attentionmasknone0 self asserttruenp allnp meanaudio 1e3 self asserttruenp allnp absnp varaudio 1 1e3 coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license creates a random float32 tensor make sure that inputs increase in size tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test feature size test not batched input test batched test 2 d numpy arrays are batched test truncation required automatic decoding with librispeech fmt off fmt on rescale to 0 65535 to show issue
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class WhisperFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=10, hop_length=160, chunk_length=8, padding_value=0.0, sampling_rate=4_000, return_attention_mask=False, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize self.feature_size = feature_size self.chunk_length = chunk_length self.hop_length = hop_length def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class WhisperFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = WhisperFeatureExtractor if is_speech_available() else None def setUp(self): self.feat_extract_tester = WhisperFeatureExtractionTester(self) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size) encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in range(200, (feature_extractor.n_samples + 500), 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] speech_inputs_truncated = [x[: feature_extractor.n_samples] for x in speech_inputs] np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration(self): EXPECTED_INPUT_FEATURES = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features self.assertEqual(input_features.shape, (1, 80, 3000)) self.assertTrue(torch.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4)) def test_zero_mean_unit_variance_normalization_trunc_np_longest(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) audio = self._load_datasamples(1)[0] audio = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 audio = feat_extract.zero_mean_unit_var_norm([audio], attention_mask=None)[0] self.assertTrue(np.all(np.mean(audio) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(audio) - 1) < 1e-3))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite because of inputfeatures signature parameters is an ordereddict so argnames order is deterministic overwrite because of inputfeatures we override with a slightly higher tol value as test recently became flaky overwrite because of inputfeatures convert flax model to pytorch model check that all base model weights are loaded correctly overwrite because of inputfeatures convert flax model to pytorch model check that all base model weights are loaded correctly save pt model overwrite because of inputfeatures convert flax model to pytorch model check that all base model weights are loaded correctly overwrite because of inputfeatures check that all base model weights are loaded correctly overwrite because of inputfeatures check that all base model weights are loaded correctly automatic decoding with librispeech fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on computes the output length of the convolutional layers overwrite because of inputfeatures overwrite because of inputfeatures signature parameters is an ordereddict so argnames order is deterministic whisperencoder has no inputsembeds and thus the getinputembeddings fn is not implemented whisperencoder cannot resize token embeddings since it has no tokens embeddings whisperencoder does not have any base model whisperencoder does not have any base model whisperencoder does not have any base model whisperencoder does not have any base model whisperencoder does not have any base model coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite because of input_features signature parameters is an ordereddict so arg_names order is deterministic overwrite because of input_features we override with a slightly higher tol value as test recently became flaky overwrite because of input_features convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly overwrite because of input_features convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly save pt model overwrite because of input_features convert flax model to pytorch model skip the flax at the beginning check that all base model weights are loaded correctly overwrite because of input_features check that all base model weights are loaded correctly overwrite because of input_features check that all base model weights are loaded correctly automatic decoding with librispeech fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt skip computes the output length of the convolutional layers overwrite because of input_features overwrite because of input_features signature parameters is an ordereddict so arg_names order is deterministic whisperencoder has no inputs_embeds and thus the get_input_embeddings fn is not implemented whisperencoder cannot resize token embeddings since it has no tokens embeddings whisperencoder does not have any base model whisperencoder does not have any base model whisperencoder does not have any base model whisperencoder does not have any base model whisperencoder does not have any base model
import functools import inspect import tempfile import unittest import transformers from transformers import WhisperConfig, is_flax_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from transformers.utils import cached_property from transformers.utils.import_utils import is_datasets_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_datasets_available(): import datasets from datasets import load_dataset if is_flax_available(): import jax import numpy as np from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import ( FLAX_MODEL_MAPPING, FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, WhisperFeatureExtractor, WhisperProcessor, ) from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.models.whisper.modeling_flax_whisper import sinusoidal_embedding_init @require_flax class FlaxWhisperModelTester: config_cls = WhisperConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=False, vocab_size=99, d_model=16, decoder_attention_heads=4, decoder_ffn_dim=16, decoder_layers=2, encoder_attention_heads=4, encoder_ffn_dim=16, encoder_layers=2, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=70, max_source_positions=30, max_target_positions=40, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = encoder_layers self.num_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_seq_length = seq_length // 2 self.decoder_seq_length = 1 self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs_for_common(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = np.array(self.batch_size * [[self.decoder_start_token_id]]) config = WhisperConfig( vocab_size=self.vocab_size, num_mel_bins=self.num_mel_bins, decoder_start_token_id=self.decoder_start_token_id, is_encoder_decoder=True, activation_function=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, pad_token_id=self.pad_token_id, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, tie_word_embeddings=True, d_model=self.d_model, decoder_attention_heads=self.decoder_attention_heads, decoder_ffn_dim=self.decoder_ffn_dim, decoder_layers=self.decoder_layers, encoder_attention_heads=self.encoder_attention_heads, encoder_ffn_dim=self.encoder_ffn_dim, encoder_layers=self.encoder_layers, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) inputs_dict = prepare_whisper_inputs_dict(config, input_features, decoder_input_ids) return config, inputs_dict def prepare_whisper_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape, dtype=np.int8), np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id).astype(np.int8), ], axis=-1, ) return { "input_features": input_ids, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } def partialclass(cls, *args, **kwargs): class NewCls(cls): __init__ = functools.partialmethod(cls.__init__, *args, **kwargs) return NewCls def make_partial_class(full_class, *args, **kwargs): partial_class = partialclass(full_class, *args, **kwargs) partial_class.__name__ = full_class.__name__ partial_class.__module__ = full_class.__module__ return partial_class @require_flax class FlaxWhisperModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxWhisperForConditionalGeneration, FlaxWhisperModel) if is_flax_available() else () all_generative_model_classes = (FlaxWhisperForConditionalGeneration,) if is_flax_available() else () is_encoder_decoder = True test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = FlaxWhisperModelTester(self) _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self.init_shape = (1,) + inputs_dict["input_features"].shape[1:] self.all_model_classes = ( make_partial_class(model_class, input_shape=self.init_shape) for model_class in self.all_model_classes ) self.config_tester = ConfigTester(self, config_class=WhisperConfig) def test_config(self): self.config_tester.run_common_tests() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features", "decoder_input_ids"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_features, decoder_input_ids, **kwargs): return model(input_features=input_features, decoder_input_ids=decoder_input_ids, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes) @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) pt_model_class = getattr(transformers, model_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) pt_model_class = getattr(transformers, base_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix])) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) pt_model_class = getattr(transformers, model_class.__name__[4:]) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix])) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_encoder_sinusoidal_embed_positions(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) params = model.params if model.base_model_prefix in params: params = model.params[model.base_model_prefix] embeds = params["encoder"]["embed_positions"]["embedding"] sinusoids = sinusoidal_embedding_init(None, embeds.shape) self.assertTrue(jax.numpy.allclose(embeds, sinusoids)) @slow @require_flax class FlaxWhisperModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_tiny_logits_librispeech(self): model = FlaxWhisperModel.from_pretrained("openai/whisper-tiny", from_pt=True) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="np").input_features logits = model( input_features, decoder_input_ids=np.array([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, ) EXPECTED_LOGITS = np.array( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) self.assertTrue(np.allclose(logits[0][0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) def test_small_en_logits_librispeech(self): model = FlaxWhisperModel.from_pretrained("openai/whisper-small.en", from_pt=True) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="np").input_features logits = model( input_features, decoder_input_ids=np.array([model.config.decoder_start_token_id]), output_hidden_states=False, output_attentions=False, return_dict=False, ) logits = logits[0] @ model.params["model"]["decoder"]["embed_tokens"]["embedding"].T EXPECTED_LOGITS = np.array( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) def test_large_logits_librispeech(self): model = FlaxWhisperModel.from_pretrained("openai/whisper-large", from_pt=True) input_speech = self._load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="np" ) input_features = processed_inputs.input_features decoder_input_ids = processed_inputs.labels logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, return_dict=False, ) logits = logits[0] @ model.params["model"]["decoder"]["embed_tokens"]["embedding"].T EXPECTED_LOGITS = np.array( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) def test_tiny_en_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor( raw_speech=input_speech, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="jax" ).input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20).sequences transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_tiny_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny", from_pt=True) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor( raw_speech=input_speech, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="jax" ).input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20).sequences transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_large_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor( raw_speech=input_speech, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="jax" ).input_features model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") generated_ids = model.generate(input_features, num_beams=5, max_length=20).sequences transcript = processor.tokenizer.decode(generated_ids[0], skip_special_tokens=True) EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_large_generation_multilingual(self): processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True) ds = load_dataset("common_voice", "ja", split="test", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="np") model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="transcribe") generated_ids = model.generate(input_features, do_sample=False, max_length=20).sequences transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") generated_ids = model.generate( input_features, do_sample=False, max_length=20, ).sequences transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." self.assertEqual(transcript, EXPECTED_TRANSCRIPT) model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="translate") generated_ids = model.generate(input_features, do_sample=False, max_length=20).sequences transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_large_batched_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="np").input_features generated_ids = model.generate(input_features, max_length=20).sequences EXPECTED_LOGITS = np.array( [ [50258, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404, 281], [50258, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257, 50257], [50258, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256], [50258, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 11] ] ) self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all,", ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) def test_tiny_en_batched_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="np").input_features generated_ids = model.generate(input_features, max_length=20).sequences EXPECTED_LOGITS = np.array( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_timestamp_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = np.concatenate(self._load_datasamples(4)) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="jax").input_features generate_fn = jax.jit(functools.partial(model.generate, max_length=448, return_timestamps=True)) generated_ids = generate_fn(input_features) EXPECTED_OUTPUT = np.array([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) self.assertTrue(np.allclose(generated_ids, EXPECTED_OUTPUT)) EXPECTED_TRANSCRIPT = [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is" " Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season" " of the year, with Christmas and roast beef looming before us, similarly drawn from eating and" " its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins'" " work is really Greek after all, and" ), "offsets": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 6.5600000000000005), }, { "text": " Nor is Mr. Quilter's manner less interesting than his matter.", "timestamp": (6.5600000000000005, 11.24), }, { "text": ( " He tells us that at this festive season of the year, with Christmas and roast beef" " looming" ), "timestamp": (11.24, 16.88), }, { "text": ( " before us, similarly drawn from eating and its results occur most readily to the mind." ), "timestamp": (16.88, 23.76), }, { "text": ( " He has grave doubts whether Sir Frederick Latins' work is really Greek after all, and" ), "timestamp": (23.76, 29.44), }, ], } ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True, output_offsets=True) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) class FlaxWhisperEncoderModelTester: def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, num_mel_bins=80, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, classifier_proj_size=4, num_labels=2, is_encoder_decoder=False, is_decoder=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens self.classifier_proj_size = classifier_proj_size self.num_labels = num_labels self.is_encoder_decoder = is_encoder_decoder self.is_decoder = is_decoder def get_config(self): return WhisperConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, classifier_proj_size=self.classifier_proj_size, num_labels=self.num_labels, is_encoder_decoder=self.is_encoder_decoder, is_decoder=self.is_decoder, ) def prepare_whisper_encoder_inputs_dict( self, input_features, ): return { "input_features": input_features, } def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length]) config = self.get_config() inputs_dict = self.prepare_whisper_encoder_inputs_dict( input_features=input_features, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths @property def encoder_seq_length(self): return self.get_subsampled_output_lengths(self.seq_length) @require_flax class WhisperEncoderModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxWhisperForAudioClassification,) if is_flax_available() else () is_encoder_decoder = False fx_compatible = False test_pruning = False test_missing_keys = False input_name = "input_features" def setUp(self): self.model_tester = FlaxWhisperEncoderModelTester(self) _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self.init_shape = (1,) + inputs_dict["input_features"].shape[1:] self.all_model_classes = ( make_partial_class(model_class, input_shape=self.init_shape) for model_class in self.all_model_classes ) self.config_tester = ConfigTester(self, config_class=WhisperConfig) def test_config(self): self.config_tester.run_common_tests() def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_features, **kwargs): return model(input_features=input_features, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features", "attention_mask", "output_attentions"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_inputs_embeds(self): pass def test_model_common_attributes(self): pass def test_resize_tokens_embeddings(self): pass def test_save_load_to_base(self): pass def test_save_load_from_base(self): pass @is_pt_flax_cross_test def test_save_load_from_base_pt(self): pass @is_pt_flax_cross_test def test_save_load_to_base_pt(self): pass @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): pass
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow whisper model from future import annotations import inspect import tempfile import traceback import unittest import numpy as np from transformers import whisperconfig whisperfeatureextractor whisperprocessor from transformers testingutils import istfavailable requiretf requiretokenizers runtestinsubprocess slow from transformers utils import cachedproperty from transformers utils importutils import isdatasetsavailable from testconfigurationcommon import configtester from testmodelingtfcommon import tfmodeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if isdatasetsavailable import datasets from datasets import loaddataset if istfavailable import tensorflow as tf from transformers import tfwhisperforconditionalgeneration tfwhispermodel setseed from transformers models whisper modelingtfwhisper import tfwhisperdecoder tfwhisperencoder sinusoidalembeddinginit def preparewhisperinputsdict config inputfeatures decoderinputids attentionmasknone decoderattentionmasknone headmasknone decoderheadmasknone crossattnheadmasknone if decoderattentionmask is none decoderattentionmask tf wheredecoderinputids config padtokenid 1 0 if headmask is none headmask tf onesconfig encoderlayers config encoderattentionheads if decoderheadmask is none decoderheadmask tf onesconfig decoderlayers config decoderattentionheads if crossattnheadmask is none crossattnheadmask tf onesconfig decoderlayers config decoderattentionheads return inputfeatures inputfeatures decoderinputids decoderinputids decoderattentionmask decoderattentionmask headmask headmask decoderheadmask decoderheadmask crossattnheadmask crossattnheadmask requiretf class tfwhispermodeltester def init self parent batchsize13 seqlength60 istrainingtrue uselabelsfalse vocabsize200 hiddensize16 numhiddenlayers2 numattentionheads4 inputchannels1 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings20 maxsourcepositions30 maxtargetpositions60 bostokenid98 eostokenid98 padtokenid0 nummelbins80 decoderstarttokenid85 numconvlayers1 suppresstokensnone beginsuppresstokensnone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self inputchannels inputchannels self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self nummelbins nummelbins self maxpositionembeddings maxpositionembeddings self maxsourcepositions maxsourcepositions self maxtargetpositions maxtargetpositions self eostokenid eostokenid self padtokenid padtokenid self bostokenid bostokenid self decoderstarttokenid decoderstarttokenid self numconvlayers numconvlayers self suppresstokens suppresstokens self beginsuppresstokens beginsuppresstokens def prepareconfigandinputsself inputfeatures floatstensorself batchsize self nummelbins self seqlength self vocabsize decoderinputids idstensorself batchsize self seqlength self vocabsize config self getconfig inputsdict preparewhisperinputsdict config attentionmasknone inputfeaturesinputfeatures decoderinputidsdecoderinputids return config inputsdict def getconfigself return whisperconfig vocabsizeself vocabsize dmodelself hiddensize encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads inputchannelsself inputchannels dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings maxsourcepositionsself maxsourcepositions maxtargetpositionsself maxtargetpositions eostokenidself eostokenid bostokenidself bostokenid padtokenidself padtokenid decoderffndimself hiddensize encoderffndimself hiddensize decoderstarttokenidself decoderstarttokenid suppresstokensself suppresstokens beginsuppresstokensself beginsuppresstokens def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def getsubsampledoutputlengthsself inputlengths for i in rangeself numconvlayers inputlengths inputlengths 1 2 1 return inputlengths def createandcheckmodelforwardself config inputsdict model tfwhispermodelconfigconfig inputfeatures inputsdictinputfeatures decoderinputids inputsdictdecoderinputids first forward pass lasthiddenstate modelinputfeatures decoderinputidsdecoderinputids lasthiddenstate self parent asserttruelasthiddenstate shape 13 7 16 def createandcheckdecodermodelpastlargeinputsself config inputsdict model tfwhispermodelconfigconfig getdecoder take a slice so we re shorter than the seqeuence length and can append later inputids inputsdictdecoderinputids 10 attentionmask inputsdictdecoderattentionmask 10 first forward pass outputs modelinputids attentionmaskattentionmask usecachetrue output pastkeyvalues outputs totuple create hypothetical multiple next token and extent to nextinputids nexttoken idstensorself batchsize 3 config vocabsize nexttokens tf wherenexttoken 2 2 nexttoken nextattnmask idstensorself batchsize 3 2 append to next inputids and nextinputids tf concatinputids nexttokens axis1 nextattentionmask tf concatattentionmask nextattnmask axis1 outputfromnopast modelnextinputids attentionmasknextattentionmasklasthiddenstate outputfrompast modelnexttokens attentionmasknextattentionmask pastkeyvaluespastkeyvalues lasthiddenstate select random slice randomsliceidx np random randint0 outputfrompast shape1 outputfromnopastslice outputfromnopast 3 randomsliceidx outputfrompastslice outputfrompast randomsliceidx self parent asserttrueoutputfrompastslice shape1 nexttokens shape1 test that outputs are equal for slice self parent asserttruenp allcloseoutputfrompastslice outputfromnopastslice atol1e2 def checkencoderdecodermodelstandaloneself config inputsdict model tfwhispermodelconfigconfig outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder tfwhisperencoder frompretrainedtmpdirname encoderlasthiddenstate2 encoderinputsdictinputfeatures0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max 1e3 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder tfwhisperdecoder frompretrainedtmpdirname lasthiddenstate2 decoder inputidsinputsdictdecoderinputids attentionmaskinputsdictdecoderattentionmask encoderhiddenstatesencoderlasthiddenstate 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max 1e3 requiretf class tfwhispermodeltesttfmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses tfwhispermodel tfwhisperforconditionalgeneration if istfavailable else allgenerativemodelclasses tfwhisperforconditionalgeneration if istfavailable else pipelinemodelmapping featureextraction tfwhispermodel if istfavailable else isencoderdecoder true fxcompatible false testpruning false testmissingkeys false testonnx false inputname inputfeatures todo ydshieh undo skip once a fix is done on tf side unittest skipskip for now as tf 2 13 breaks it on gpu def testxlagenerateslowself super testxlagenerateslow def setupself self modeltester tfwhispermodeltesterself self configtester configtesterself configclasswhisperconfig self maxdiff 3000 def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig model build with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname savedmodelfalse model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testmodelforwardself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelforwardconfigandinputs def testrequiresgradencoderembedpositionsself config self modeltester getconfig for modelclass in self allmodelclasses model modelclassconfig encoder model getencoder self assertfalseencoder embedpositions trainable def testencodersinusoidalembedpositionsself config self modeltester getconfig for modelclass in self allmodelclasses model modelclassconfig model build embeds model getencoder embedpositions getweights0 sinusoids sinusoidalembeddinginitembeds shape numpy self asserttruenp allcloseembeds sinusoids def testdecodermodelpastwithlargeinputsself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckdecodermodelpastlargeinputsconfigandinputs def getinputidsandconfigself config inputsdict self modeltester prepareconfigandinputsforcommon inputids inputsdictself inputname cut to half length take max batchsize 3 maxbatchsize 3 inputids inputids maxbatchsize generate max 3 tokens maxlength 4 if config eostokenid is not none and config padtokenid is none hack to allow generate for models such as gpt2 as is done in generate config padtokenid config eostokenid return config inputids none maxlength not implemented currently def testinputsembedsself pass unittest skiptraining is not yet supported def testtrainingself pass def testgeneratewithheadmaskingself pass unittest skipfp16 is not yet supported for tf models def testgeneratefp16self config inputdict self modeltester prepareconfigandinputs config maxtargetpositions 400 inputfeatures inputdictinputfeatures model tfwhisperforconditionalgenerationconfig model generateinputfeatures model generateinputfeatures numbeams4 dosampletrue earlystoppingfalse numreturnsequences3 def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel call signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputfeatures decoderinputids decoderattentionmask expectedargnames extend decoderpositionids headmask decoderheadmask crossattnheadmask encoderoutputs if headmask and decoderheadmask and crossattnheadmask in argnames else encoderoutputs self assertlistequalargnames lenexpectedargnames expectedargnames def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates expectednumlayers getattr self modeltester expectednumhiddenlayers self modeltester numhiddenlayers 1 self assertequallenhiddenstates expectednumlayers if hasattrself modeltester encoderseqlength seqlength self modeltester encoderseqlength else seqlength self modeltester seqlength subsampledseqlength model getfeatextractoutputlengthsseqlength self assertlistequal listhiddenstates0 shape2 subsampledseqlength self modeltester hiddensize if config isencoderdecoder hiddenstates outputs decoderhiddenstates self assertisinstancehiddenstates list tuple self assertequallenhiddenstates expectednumlayers decoderseqlength getattrself modeltester decoderseqlength seqlength self assertlistequal listhiddenstates0 shape2 decoderseqlength self modeltester hiddensize config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def checkpttfoutputsself tfoutputs ptoutputs modelclass tol5e5 nameoutputs attributesnone we override with a slightly higher tol value as test recently became flaky super checkpttfoutputstfoutputs ptoutputs modelclass tol name attributes def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen encoderkeylength getattrself modeltester keylength encoderseqlength decoderkeylength getattrself modeltester decoderkeylength encoderkeylength for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig subsampledencoderseqlength model getfeatextractoutputlengthsencoderseqlength subsampledencoderkeylength model getfeatextractoutputlengthsencoderkeylength outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength outlen lenoutputs correctoutlen 5 loss is at first position if labels in inputsdict correctoutlen 1 loss is added to beginning if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength decoderkeylength cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength subsampledencoderkeylength check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig outputs modelself prepareforclassinputsdict modelclass addedhiddenstates 2 self assertequaloutlen addedhiddenstates lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads subsampledencoderseqlength subsampledencoderkeylength def testgeneratewithoutinputidsself pass staticmethod def getencoderoutputs model inputids attentionmask outputattentionsnone outputhiddenstatesnone numinterleave1 encoder model getencoder encoderoutputs encoder inputids outputattentionsoutputattentions outputhiddenstatesoutputhiddenstates encoderoutputslasthiddenstate encoderoutputs lasthiddenstate repeatinterleave numinterleave dim0 inputids inputids 0 inputids tf zeroslikeinputids 1 dtypetf int64 tf converttotensor model getdecoderstarttokenid attentionmask none return encoderoutputs inputids attentionmask def checkoutputsself output inputids config usecachefalse numreturnsequences1 batchsize mel seqlength inputids shape subsampledseqlength self modeltester getsubsampledoutputlengthsseqlength numsequencesinoutput batchsize numreturnsequences genlen output sequences shape1 1 if config isencoderdecoder else output sequences shape1 seqlength scores self checkscoresnumsequencesinoutput output scores lengthgenlen configconfig attentions encoder self checkencoderattentionforgenerate output encoderattentions batchsize config subsampledseqlength decoder self checkattentionsforgenerate numsequencesinoutput output decoderattentions minlength1 maxlengthoutput sequences shape1 configconfig usecacheusecache hidden states encoder self checkencoderhiddenstatesforgenerate output encoderhiddenstates batchsize config subsampledseqlength decoder self checkhiddenstatesforgenerate numsequencesinoutput output decoderhiddenstates minlength1 maxlengthoutput sequences shape1 configconfig usecacheusecache overwritten from parent due to the inability to work when nontext inputs are not passed and because the input is inputfeatures def testlmheadmodelrandomnobeamsearchgenerateself config inputsdict self modeltester prepareconfigandinputsforcommon inputfeatures inputsdict getinputfeatures none iterate over all generative models for modelclass in self allgenerativemodelclasses model modelclassconfig if config bostokenid is none if bos token id is not defined model needs inputfeatures with self assertraisesassertionerror model generatedosampletrue maxlength5 numreturnsequences 1 self checkgeneratedidsmodel generateinputfeatures dosampletrue with self assertraisesvalueerror generating multiple sequences when no beam search generation is not allowed as it would always generate the same sequences model generateinputfeatures dosamplefalse numreturnsequences2 numreturnsequences 1 sample self checkgeneratedidsmodel generateinputfeatures dosampletrue numreturnsequences2 check bad words tokens language generation create list of 1seq bad token and list of 2seq of bad tokens badwordsids self generaterandombadtokens1 model self generaterandombadtokens2 model outputtokens model generate inputfeatures dosampletrue badwordsidsbadwordsids numreturnsequences2 only count generated tokens generatedids outputtokens inputfeatures shape1 self assertfalseself checkmatchtokensgeneratedids numpy tolist badwordsids overwritten from parent due to the inability to work when nontext inputs are not passed and because the input is inputfeatures def testlmheadmodelrandombeamsearchgenerateself config inputsdict self modeltester prepareconfigandinputsforcommon inputfeatures inputsdict getinputfeatures none for modelclass in self allgenerativemodelclasses model modelclassconfig if config bostokenid is none if bos token id is not defined model needs inputids numreturnsequences 1 self checkgeneratedidsmodel generateinputfeatures dosampletrue numbeams2 with self assertraisesvalueerror generating more sequences than having beams leads is not possible model generateinputfeatures dosamplefalse numreturnsequences3 numbeams2 numreturnsequences 1 sample self checkgeneratedids model generate inputfeatures dosampletrue numbeams2 numreturnsequences2 numreturnsequences 1 greedy self checkgeneratedids model generateinputfeatures dosamplefalse numbeams2 numreturnsequences2 check bad words tokens language generation create list of 1seq bad token and list of 2seq of bad tokens badwordsids self generaterandombadtokens1 model self generaterandombadtokens2 model outputtokens model generate inputfeatures dosamplefalse badwordsidsbadwordsids numbeams2 numreturnsequences2 only count generated tokens generatedids outputtokens inputfeatures shape1 self assertfalseself checkmatchtokensgeneratedids numpy tolist badwordsids def testgeneratewithpromptidsandtaskandlanguageself config inputdict self modeltester prepareconfigandinputsforcommon model tfwhisperforconditionalgenerationconfig inputfeatures inputdictinputfeatures promptids np arange5 language de task translate langid 6 taskid 7 model generationconfig setattrlangtoid language langid model generationconfig setattrtasktoid task taskid output model generateinputfeatures maxnewtokens5 tasktask languagelanguage promptidspromptids expectedoutputstart promptids tolist model generationconfig decoderstarttokenid langid taskid for row in output numpy tolist self assertlistequalrow lenexpectedoutputstart expectedoutputstart def testgeneratewithpromptidsandforceddecoderidsself config inputdict self modeltester prepareconfigandinputsforcommon model tfwhisperforconditionalgenerationconfig inputfeatures inputdictinputfeatures promptids np asarrayrange5 forceddecoderids 1 6 2 7 3 8 output model generate inputfeatures maxnewtokens5 forceddecoderidsforceddecoderids promptidspromptids expectedoutputstart promptids tolist model generationconfig decoderstarttokenid token for rank token in forceddecoderids for row in output numpy tolist self assertlistequalrow lenexpectedoutputstart expectedoutputstart def loaddatasamplesnumsamples ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples def testlargelogitslibrispeechinqueue outqueue timeout error none try inqueue gettimeouttimeout setseed0 model tfwhispermodel frompretrainedopenaiwhisperlarge inputspeech loaddatasamples1 processor whisperprocessor frompretrainedopenaiwhisperlarge processedinputs processor audioinputspeech textthis part of the speech addspecialtokensfalse returntensorstf inputfeatures processedinputs inputfeatures decoderinputids processedinputs labels logits model inputfeatures decoderinputidsdecoderinputids outputhiddenstatesfalse outputattentionsfalse usecachefalse logits logits lasthiddenstate tf transposemodel model decoder embedtokens weights0 fmt off expectedlogits tf converttotensor 2 1382 0 9381 4 4671 3 5589 2 4022 3 8576 0 6521 2 5472 1 8301 1 9957 2 3432 1 4678 0 5459 2 2597 1 5179 2 5357 1 1624 0 6194 1 0757 1 8259 2 4076 1 6601 2 3503 1 3376 1 9891 1 8635 3 8931 5 3699 4 4772 3 9184 fmt on unittest testcase asserttruenp allcloselogits0 0 30 expectedlogits atol1e4 except exception error ftraceback formatexc results error error outqueue putresults timeouttimeout outqueue join def testlargegenerationinqueue outqueue timeout error none try inqueue gettimeouttimeout setseed0 processor whisperprocessor frompretrainedopenaiwhisperlarge model tfwhisperforconditionalgeneration frompretrainedopenaiwhisperlarge inputspeech loaddatasamples1 inputfeatures processor featureextractorrawspeechinputspeech returntensorstf inputfeatures generatedids model generate inputfeatures dosamplefalse maxlength20 languageen tasktranscribe transcript processor batchdecodegeneratedids skipspecialtokenstrue0 expectedtranscript mr quilter is the apostle of the middle classes and we are glad unittest testcase assertequaltranscript expectedtranscript except exception error ftraceback formatexc results error error outqueue putresults timeouttimeout outqueue join def testlargegenerationmultilingualinqueue outqueue timeout error none try inqueue gettimeouttimeout setseed0 processor whisperprocessor frompretrainedopenaiwhisperlarge model tfwhisperforconditionalgeneration frompretrainedopenaiwhisperlarge ds loaddatasetcommonvoice ja splittest streamingtrue ds ds castcolumnaudio datasets audiosamplingrate16000 inputspeech nextiterdsaudioarray inputfeatures processor featureextractorrawspeechinputspeech returntensorstf inputfeatures generatedids model generate inputfeatures dosamplefalse maxlength20 languageja tasktranscribe transcript processor batchdecodegeneratedids skipspecialtokenstrue0 expectedtranscript unittest testcase assertequaltranscript expectedtranscript generatedids model generate inputfeatures dosamplefalse maxlength20 languageen tasktranscribe transcript processor batchdecodegeneratedids skipspecialtokenstrue0 expectedtranscript kimurasan called me unittest testcase assertequaltranscript expectedtranscript generatedids model generate inputfeatures dosamplefalse maxlength20 languageja tasktranslate transcript processor batchdecodegeneratedids skipspecialtokenstrue0 expectedtranscript i borrowed a phone from kimura san unittest testcase assertequaltranscript expectedtranscript except exception error ftraceback formatexc results error error outqueue putresults timeouttimeout outqueue join def testlargebatchedgenerationinqueue outqueue timeout error none try inqueue gettimeouttimeout setseed0 processor whisperprocessor frompretrainedopenaiwhisperlarge model tfwhisperforconditionalgeneration frompretrainedopenaiwhisperlarge inputspeech loaddatasamples4 inputfeatures processor featureextractorrawspeechinputspeech returntensorstf inputfeatures generatedids1 model generateinputfeatures0 2 maxlength20 generatedids2 model generateinputfeatures2 4 maxlength20 generatedids np concatenategeneratedids1 generatedids2 fmt off expectedids 50258 50358 50363 2221 13 2326 388 391 307 264 50244 295 264 2808 5359 293 321 366 5404 281 50258 50358 50363 6966 307 2221 13 2326 388 391 311 9060 1570 1880 813 702 1871 13 50257 50257 50258 50358 50363 634 5112 505 300 412 341 42729 3196 295 264 1064 11 365 5272 293 12904 9256 50258 50358 50363 634 575 12525 22618 1968 6144 35617 20084 1756 311 589 307 534 10281 934 439 11 fmt on unittest testcase assertequalgeneratedids tolist expectedids fmt off expectedtranscript mr quilter is the apostle of the middle classes and we are glad to nor is mr quilter s manner less interesting than his matter he tells us that at this festive season of the year with christmas and roast beef he has grave doubts whether sir frederick layton s work is really greek after all fmt on transcript processor batchdecodegeneratedids skipspecialtokenstrue unittest testcase assertlistequaltranscript expectedtranscript except exception error ftraceback formatexc results error error outqueue putresults timeouttimeout outqueue join requiretf requiretokenizers class tfwhispermodelintegrationtestsunittest testcase cachedproperty def defaultprocessorself return whisperprocessor frompretrainedopenaiwhisperbase def loaddatasamplesself numsamples return loaddatasamplesnumsamples slow def testtinylogitslibrispeechself setseed0 model tfwhispermodel frompretrainedopenaiwhispertiny inputspeech self loaddatasamples1 featureextractor whisperfeatureextractor inputfeatures featureextractorinputspeech returntensorstf inputfeatures logits model inputfeatures decoderinputidstf converttotensor50258 50259 50359 outputhiddenstatesfalse outputattentionsfalse returndictfalse usecachefalse fmt off expectedlogits tf converttotensor 2 9892 6 7607 5 7348 3 6096 0 2152 5 7321 4 8855 1 6407 0 2823 1 5718 10 4269 3 4427 0 0219 8 0612 3 4784 8 4246 4 0575 2 2864 11 1084 0 9963 0 9884 8 5154 3 5469 9 3713 0 9786 3 5435 7 4850 5 2579 1 4366 10 4841 fmt on self asserttruenp allcloselogits00 0 30 expectedlogits atol1e4 fmt off expectedgeneration tf converttotensor 1 4651 2 6944 2 7821 2 3793 4 0738 0 0188 3 3203 1 9836 0 0520 0 7095 1 1063 0 2952 3 6786 0 5249 0 3105 4 7691 1 1562 1 3046 0 5810 0 3624 1 7006 1 3424 0 9817 2 1958 1 8775 5 7046 0 7679 4 0113 2 6848 2 8609 fmt on headlogits logits0 tf transposemodel model decoder embedtokens weights0 self asserttruenp allcloseheadlogits0 0 30 expectedgeneration atol1e4 slow def testsmallenlogitslibrispeechself setseed0 model tfwhispermodel frompretrainedopenaiwhispersmall en inputspeech self loaddatasamples1 feaureextractor whisperfeatureextractor inputfeatures feaureextractorinputspeech returntensorstf inputfeatures logits model inputfeatures decoderinputidstf converttotensormodel config decoderstarttokenid outputhiddenstatesfalse outputattentionsfalse usecachefalse logits logits lasthiddenstate tf transposemodel model decoder embedtokens weights0 fmt off expectedlogits tf converttotensor 3 6784 7 7211 9 5070 11 9286 7 6489 9 7026 5 6188 8 0104 4 6238 5 1833 9 0485 3 4079 5 4874 2 6935 6 3479 7 3398 6 9558 7 6867 7 4748 8 3463 9 9781 10 8389 10 3105 11 7201 9 7261 7 1590 5 9272 12 4509 11 1146 8 1918 fmt on self asserttruenp allcloselogits0 0 30 expectedlogits atol1e4 slow def testlargelogitslibrispeechself runtestinsubprocesstestcaseself targetfunctestlargelogitslibrispeech inputsnone slow def testtinyengenerationself setseed0 processor whisperprocessor frompretrainedopenaiwhispertiny en model tfwhisperforconditionalgeneration frompretrainedopenaiwhispertiny en model config decoderstarttokenid 50257 inputspeech self loaddatasamples1 inputfeatures processor featureextractorrawspeechinputspeech returntensorstf inputfeatures generatedids model generateinputfeatures numbeams5 maxlength20 transcript processor tokenizer batchdecodegeneratedids0 expectedtranscript startoftranscriptnotimestamps mr quilter is the apostle of the middle classes and we are glad to self assertequaltranscript expectedtranscript slow def testtinygenerationself setseed0 processor whisperprocessor frompretrainedopenaiwhispertiny model tfwhisperforconditionalgeneration frompretrainedopenaiwhispertiny inputspeech self loaddatasamples1 inputfeatures processor featureextractorrawspeechinputspeech returntensorstf inputfeatures generatedids model generateinputfeatures numbeams5 maxlength20 transcript processor tokenizer decodegeneratedids0 expectedtranscript startoftranscriptentranscribenotimestamps mr quilter is the apostle of the middle classes and we are glad self assertequaltranscript expectedtranscript slow def testtinyxlagenerationself setseed0 processor whisperprocessor frompretrainedopenaiwhispertiny model tfwhisperforconditionalgeneration frompretrainedopenaiwhispertiny inputspeech self loaddatasamples1 inputfeatures processor featureextractorrawspeechinputspeech returntensorstf inputfeatures xlagenerate tf functionmodel generate jitcompiletrue generatedids model generateinputfeatures numbeams5 maxlength20 generatedidsxla xlagenerateinputfeatures numbeams5 maxlength20 transcript processor tokenizer decodegeneratedids0 transcriptxla processor tokenizer decodegeneratedidsxla0 expectedtranscript startoftranscriptentranscribenotimestamps mr quilter is the apostle of the middle classes and we are glad self assertequaltranscript expectedtranscript self assertequaltranscriptxla expectedtranscript slow def testlargegenerationself runtestinsubprocesstestcaseself targetfunctestlargegeneration inputsnone slow def testlargegenerationmultilingualself runtestinsubprocesstestcaseself targetfunctestlargegenerationmultilingual inputsnone slow def testlargebatchedgenerationself runtestinsubprocesstestcaseself targetfunctestlargebatchedgeneration inputsnone slow def testtinyenbatchedgenerationself setseed0 processor whisperprocessor frompretrainedopenaiwhispertiny en model tfwhisperforconditionalgeneration frompretrainedopenaiwhispertiny en inputspeech self loaddatasamples4 inputfeatures processor featureextractorrawspeechinputspeech returntensorstf inputfeatures generatedids model generateinputfeatures maxlength20 fmt off expectedlogits tf converttotensor 50257 50362 1770 13 2264 346 353 318 262 46329 286 262 3504 6097 11 290 356 389 9675 284 50257 50362 5414 318 1770 13 2264 346 353 338 5642 1342 3499 621 465 2300 13 50256 50256 50256 50257 50362 679 4952 514 326 379 428 43856 1622 286 262 614 11 351 6786 290 32595 12023 28236 50257 50362 679 468 12296 17188 1771 7361 26113 18881 1122 338 670 318 1107 8312 706 477 290 460 fmt on self asserttruenp allclosegeneratedids expectedlogits fmt off expectedtranscript mr quilter is the apostle of the middle classes and we are glad to nor is mr quilter s manner less interesting than his matter he tells us that at this festive season of the year with christmas and roast beef looming he has grave doubts whether sir frederick layton s work is really greek after all and can fmt on transcript processor batchdecodegeneratedids skipspecialtokenstrue self assertlistequaltranscript expectedtranscript slow def testtinyenbatchedxlagenerationself setseed0 processor whisperprocessor frompretrainedopenaiwhispertiny en model tfwhisperforconditionalgeneration frompretrainedopenaiwhispertiny en inputspeech self loaddatasamples4 inputfeatures processor featureextractorrawspeechinputspeech returntensorstf inputfeatures xlagenerate tf functionmodel generate jitcompiletrue generatedids model generateinputfeatures maxlength20 generatedidsxla xlagenerateinputfeatures maxlength20 fmt off expectedlogits tf converttotensor 50257 50362 1770 13 2264 346 353 318 262 46329 286 262 3504 6097 11 290 356 389 9675 284 50257 50362 5414 318 1770 13 2264 346 353 338 5642 1342 3499 621 465 2300 13 50256 50256 50256 50257 50362 679 4952 514 326 379 428 43856 1622 286 262 614 11 351 6786 290 32595 12023 28236 50257 50362 679 468 12296 17188 1771 7361 26113 18881 1122 338 670 318 1107 8312 706 477 290 460 fmt on self asserttruenp allclosegeneratedids expectedlogits self asserttruenp allclosegeneratedidsxla expectedlogits fmt off expectedtranscript mr quilter is the apostle of the middle classes and we are glad to nor is mr quilter s manner less interesting than his matter he tells us that at this festive season of the year with christmas and roast beef looming he has grave doubts whether sir frederick layton s work is really greek after all and can fmt on transcript processor batchdecodegeneratedids skipspecialtokenstrue transcriptxla processor batchdecodegeneratedidsxla skipspecialtokenstrue self assertlistequaltranscript expectedtranscript self assertlistequaltranscriptxla expectedtranscript coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the tensorflow whisper model computes the output length of the convolutional layers first forward pass take a slice so we re shorter than the seqeuence length and can append later first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice todo ydshieh undo skip once a fix is done on tf side cut to half length take max batch_size 3 generate max 3 tokens hack to allow generate for models such as gpt2 as is done in generate not implemented currently signature parameters is an ordereddict so arg_names order is deterministic check that output_hidden_states also work using config we override with a slightly higher tol value as test recently became flaky check that output_attentions also work using config loss is at first position loss is added to beginning past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine scores attentions encoder decoder hidden states encoder decoder overwritten from parent due to the inability to work when non text inputs are not passed and because the input is input_features iterate over all generative models if bos token id is not defined model needs input_features num_return_sequences 1 generating multiple sequences when no beam search generation is not allowed as it would always generate the same sequences num_return_sequences 1 sample check bad words tokens language generation create list of 1 seq bad token and list of 2 seq of bad tokens only count generated tokens overwritten from parent due to the inability to work when non text inputs are not passed and because the input is input_features if bos token id is not defined model needs input_ids num_return_sequences 1 generating more sequences than having beams leads is not possible num_return_sequences 1 sample num_return_sequences 1 greedy check bad words tokens language generation create list of 1 seq bad token and list of 2 seq of bad tokens only count generated tokens automatic decoding with librispeech fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on
from __future__ import annotations import inspect import tempfile import traceback import unittest import numpy as np from transformers import WhisperConfig, WhisperFeatureExtractor, WhisperProcessor from transformers.testing_utils import is_tf_available, require_tf, require_tokenizers, run_test_in_subprocess, slow from transformers.utils import cached_property from transformers.utils.import_utils import is_datasets_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_datasets_available(): import datasets from datasets import load_dataset if is_tf_available(): import tensorflow as tf from transformers import TFWhisperForConditionalGeneration, TFWhisperModel, set_seed from transformers.models.whisper.modeling_tf_whisper import ( TFWhisperDecoder, TFWhisperEncoder, sinusoidal_embedding_init, ) def prepare_whisper_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = tf.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_features": input_features, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFWhisperModelTester: def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=False, vocab_size=200, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, max_target_positions=60, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_whisper_inputs_dict( config, attention_mask=None, input_features=input_features, decoder_input_ids=decoder_input_ids, ) return config, inputs_dict def get_config(self): return WhisperConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, decoder_start_token_id=self.decoder_start_token_id, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict): model = TFWhisperModel(config=config) input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFWhisperModel(config=config).get_decoder() input_ids = inputs_dict["decoder_input_ids"][:, :-10] attention_mask = inputs_dict["decoder_attention_mask"][:, :-10] outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_token = ids_tensor((self.batch_size, 3), config.vocab_size) next_tokens = tf.where(next_token <= 2, 2, next_token) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = np.random.randint(0, output_from_past.shape[-1]) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(np.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = TFWhisperModel(config=config) outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = TFWhisperEncoder.from_pretrained(tmpdirname) encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = TFWhisperDecoder.from_pretrained(tmpdirname) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max() < 1e-3) @require_tf class TFWhisperModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFWhisperModel, TFWhisperForConditionalGeneration) if is_tf_available() else () all_generative_model_classes = (TFWhisperForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFWhisperModel} if is_tf_available() else {} is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False test_onnx = False input_name = "input_features" @unittest.skip("Skip for now as TF 2.13 breaks it on GPU") def test_xla_generate_slow(self): super().test_xla_generate_slow() def setUp(self): self.model_tester = TFWhisperModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) model.build() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_requires_grad_encoder_embed_positions(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) encoder = model.get_encoder() self.assertFalse(encoder.embed_positions.trainable) def test_encoder_sinusoidal_embed_positions(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) model.build() embeds = model.get_encoder().embed_positions.get_weights()[0] sinusoids = sinusoidal_embedding_init(embeds.shape).numpy() self.assertTrue(np.allclose(embeds, sinusoids)) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] max_batch_size = 3 input_ids = input_ids[:max_batch_size, :, :] max_length = 4 if config.eos_token_id is not None and config.pad_token_id is None: config.pad_token_id = config.eos_token_id return config, input_ids, None, max_length def test_inputs_embeds(self): pass @unittest.skip("Training is not yet supported") def test_training(self): pass def test_generate_with_head_masking(self): pass @unittest.skip("fp16 is not yet supported for TF models") def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.max_target_positions = 400 input_features = input_dict["input_features"] model = TFWhisperForConditionalGeneration(config) model.generate(input_features) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["decoder_position_ids", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", encoder_key_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] input_ids = tf.zeros_like(input_ids[:, :1], dtype=tf.int64) + tf.convert_to_tensor( [model._get_decoder_start_token_id()] ) attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, mel, seq_length = input_ids.shape subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: with self.assertRaises(AssertionError): model.generate(do_sample=True, max_length=5) self._check_generated_ids(model.generate(input_features, do_sample=True)) with self.assertRaises(ValueError): model.generate(input_features, do_sample=False, num_return_sequences=2) self._check_generated_ids(model.generate(input_features, do_sample=True, num_return_sequences=2)) bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: self._check_generated_ids(model.generate(input_features, do_sample=True, num_beams=2)) with self.assertRaises(ValueError): model.generate(input_features, do_sample=False, num_return_sequences=3, num_beams=2) self._check_generated_ids( model.generate( input_features, do_sample=True, num_beams=2, num_return_sequences=2, ) ) self._check_generated_ids( model.generate(input_features, do_sample=False, num_beams=2, num_return_sequences=2) ) bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_generate_with_prompt_ids_and_task_and_language(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = TFWhisperForConditionalGeneration(config) input_features = input_dict["input_features"] prompt_ids = np.arange(5) language = "<|de|>" task = "translate" lang_id = 6 task_id = 7 model.generation_config.__setattr__("lang_to_id", {language: lang_id}) model.generation_config.__setattr__("task_to_id", {task: task_id}) output = model.generate(input_features, max_new_tokens=5, task=task, language=language, prompt_ids=prompt_ids) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, lang_id, task_id, ] for row in output.numpy().tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def test_generate_with_prompt_ids_and_forced_decoder_ids(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = TFWhisperForConditionalGeneration(config) input_features = input_dict["input_features"] prompt_ids = np.asarray(range(5)) forced_decoder_ids = [(1, 6), (2, 7), (3, 8)] output = model.generate( input_features, max_new_tokens=5, forced_decoder_ids=forced_decoder_ids, prompt_ids=prompt_ids ) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, *[token for _rank, token in forced_decoder_ids], ] for row in output.numpy().tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def _load_datasamples(num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _test_large_logits_librispeech(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="tf" ) input_features = processed_inputs.input_features decoder_input_ids = processed_inputs.labels logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) EXPECTED_LOGITS = tf.convert_to_tensor( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) unittest.TestCase().assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_generation(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_generation_multilingual(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") ds = load_dataset("common_voice", "ja", split="test", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="translate" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_batched_generation(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids_1 = model.generate(input_features[0:2], max_length=20) generated_ids_2 = model.generate(input_features[2:4], max_length=20) generated_ids = np.concatenate([generated_ids_1, generated_ids_2]) EXPECTED_IDS = [ [50258, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404, 281], [50258, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257, 50257], [50258, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256], [50258, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 11] ] unittest.TestCase().assertEqual(generated_ids.tolist(), EXPECTED_IDS) EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all," ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) unittest.TestCase().assertListEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() @require_tf @require_tokenizers class TFWhisperModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): return _load_datasamples(num_samples) @slow def test_tiny_logits_librispeech(self): set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="tf").input_features logits = model( input_features, decoder_input_ids=tf.convert_to_tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) EXPECTED_LOGITS = tf.convert_to_tensor( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) self.assertTrue(np.allclose(logits[0][0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) EXPECTED_GENERATION = tf.convert_to_tensor( [ -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 ] ) head_logits = logits[0] @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) self.assertTrue(np.allclose(head_logits[0, 0, :30], EXPECTED_GENERATION, atol=1e-4)) @slow def test_small_en_logits_librispeech(self): set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-small.en") input_speech = self._load_datasamples(1) feaure_extractor = WhisperFeatureExtractor() input_features = feaure_extractor(input_speech, return_tensors="tf").input_features logits = model( input_features, decoder_input_ids=tf.convert_to_tensor([[model.config.decoder_start_token_id]]), output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) EXPECTED_LOGITS = tf.convert_to_tensor( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) @slow def test_large_logits_librispeech(self): run_test_in_subprocess(test_case=self, target_func=_test_large_logits_librispeech, inputs=None) @slow def test_tiny_en_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.batch_decode(generated_ids)[0] EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes, and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_xla_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = model.generate(input_features, num_beams=5, max_length=20) generated_ids_xla = xla_generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) transcript_xla = processor.tokenizer.decode(generated_ids_xla[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) self.assertEqual(transcript_xla, EXPECTED_TRANSCRIPT) @slow def test_large_generation(self): run_test_in_subprocess(test_case=self, target_func=_test_large_generation, inputs=None) @slow def test_large_generation_multilingual(self): run_test_in_subprocess(test_case=self, target_func=_test_large_generation_multilingual, inputs=None) @slow def test_large_batched_generation(self): run_test_in_subprocess(test_case=self, target_func=_test_large_batched_generation, inputs=None) @slow def test_tiny_en_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, max_length=20) EXPECTED_LOGITS = tf.convert_to_tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_en_batched_xla_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = model.generate(input_features, max_length=20) generated_ids_xla = xla_generate(input_features, max_length=20) EXPECTED_LOGITS = tf.convert_to_tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) self.assertTrue(np.allclose(generated_ids_xla, EXPECTED_LOGITS)) EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) transcript_xla = processor.batch_decode(generated_ids_xla, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) self.assertListEqual(transcript_xla, EXPECTED_TRANSCRIPT)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch whisper model import copy import inspect import os import random import tempfile import time import unittest import numpy as np import pytest import transformers from transformers import whisperconfig from transformers testingutils import isptflaxcrosstest requireflashattn requiretorch requiretorchfp16 requiretorchgpu requiretorchaudio slow torchdevice from transformers utils import cachedproperty isflaxavailable istorchavailable from transformers utils importutils import isdatasetsavailable from generation testutils import generationtestermixin from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor from testpipelinemixin import pipelinetestermixin if isdatasetsavailable import datasets from datasets import audio loaddataset if istorchavailable import torch from transformers import whisperfeatureextractor whisperforaudioclassification whisperforcausallm whisperforconditionalgeneration whispermodel whisperprocessor setseed from transformers generation logitsprocess import logitsprocessor from transformers models whisper modelingwhisper import whisperdecoder whisperencoder sinusoids class dummytimestamplogitprocessorlogitsprocessor we don t want to randomely sample timestamp tokens make sure to use correct index if a batch was removed produce timestamp with 30 force a timestamp force the same as before inputids inputfeatures computes the output length of the convolutional layers first forward pass first forward pass create hypothetical multiple next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice needs higher percentages after model tester s vocabsize is changed to 200 pr 21222 0 5 is for testdiskoffload which also works for testmodelparallelism todo fix the failed tests runtimeerror the size of tensor a 1500 must match the size of tensor b 30 at nonsingleton dimension 1 cut to half length take max batchsizebatchsize generate max 3 tokens hack to allow generate for models such as gpt2 as is done in generate training is not supported yet hack to keep the test fast and not require downloading a model with a generationconfig test language code test tokenizer code test language name signature parameters is an ordereddict so argnames order is deterministic check that outputhiddenstates also work using config check that outputattentions also work using config loss is at first position decoder attentions cross attentions check attention is always last and order is fine retrieve the embeddings and clone theme check that resizing the token embeddings with a larger vocab size increases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix make sure that decoderinputids are resized check that adding and removing tokens has not modified the first part of the embedding matrix if model cannot untied embeddings leave test if no output embeddings leave test check that resizing the token embeddings with a larger vocab size increases the model s vocab size check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that the model can still do a forward pass successfully every parameter should be resized scores attentions encoder decoder hidden states encoder decoder whisper fa2 needs very high tolerance check with inference dropout whisper fa2 needs very high tolerance whisper fa2 needs very high tolerance prepare attentionmask with shape batchsize sequencelength we override with a slightly higher tol value as test recently became flaky we override with a slightly higher tol value as test recently became flaky no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax send pytorch model to the correct device no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax make sure weights are tied in pytorch send pytorch model to the correct device send pytorch model to the correct device forward pass forward pass len 250 with numinputframes 60 force bsz1 each chunk should not be longer than 10 if input features are long can t set returntimestamps to false if input features are long need to set generation config make sure that we only have the same begin token len 250 with numinputframes 60 force bsz1 make sure that we only have the same begin token automatic decoding with librispeech fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on apply specaugment set model to training mode to enable specaugment fmt off fmt on warm up assisted decoding warm up nonassisted decoding assisted decoding nonassisted decoding warm up assisted decoding warm up nonassisted decoding assisted decoding nonassisted decoding fmt off fmt on fmt off fmt on make sure single batch is exactly the same exact match fmt off fmt on computes the output length of the convolutional layers first forward pass signature parameters is an ordereddict so argnames order is deterministic input embeds is meaningless for an encoderonly acoustic model the equivalent test is passing the encoder outputs directly to the model needs to override as the encoder input embedding is a conv1d whisperencoder cannot resize token embeddings since it has no tokens embeddings no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax send pytorch model to the correct device no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax make sure weights are tied in pytorch send pytorch model to the correct device send pytorch model to the correct device first forward pass create hypothetical next token and extent to nextinputids append to next inputids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids change a random masked slice from inputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice generate only works with input ids for whisper decoder cannot keep gradients coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch whisper model this processor fakes the correct timestamps tokens pattern tok_1 tok_2 tok_n time_stamp_tok_1 time_stamp_tok_2 tok_n 1 we don t want to randomely sample timestamp tokens make sure to use correct index if a batch was removed produce timestamp with 30 force a timestamp force the same as before input_ids input_features computes the output length of the convolutional layers first forward pass first forward pass create hypothetical multiple next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice needs higher percentages after model tester s vocab_size is changed to 200 pr 21222 0 5 is for test_disk_offload which also works for test_model_parallelism todo fix the failed tests runtimeerror the size of tensor a 1500 must match the size of tensor b 30 at non singleton dimension 1 cut to half length take max batch_size batch_size generate max 3 tokens hack to allow generate for models such as gpt2 as is done in generate training is not supported yet hack to keep the test fast and not require downloading a model with a generation_config test language code test tokenizer code test language name signature parameters is an ordereddict so arg_names order is deterministic check that output_hidden_states also work using config check that output_attentions also work using config loss is at first position loss is added to beginning past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine retrieve the embeddings and clone theme check that resizing the token embeddings with a larger vocab size increases the model s vocab size check that it actually resizes the embeddings matrix check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix make sure that decoder_input_ids are resized check that adding and removing tokens has not modified the first part of the embedding matrix if model cannot untied embeddings leave test if no output embeddings leave test check that resizing the token embeddings with a larger vocab size increases the model s vocab size check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that resizing the token embeddings with a smaller vocab size decreases the model s vocab size check that it actually resizes the embeddings matrix check bias if present check that the model can still do a forward pass successfully every parameter should be resized check that the model can still do a forward pass successfully every parameter should be resized scores attentions encoder decoder hidden states encoder decoder whisper fa2 needs very high tolerance check with inference dropout whisper fa2 needs very high tolerance whisper fa2 needs very high tolerance to be sure we have no nan fstm still requires this hack fstm should probably be refactored similar to bart afterward prepare attention_mask with shape batch_size sequence_length we override with a slightly higher tol value as test recently became flaky we override with a slightly higher tol value as test recently became flaky no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax send pytorch model to the correct device no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax make sure weights are tied in pytorch send pytorch model to the correct device send pytorch model to the correct device forward pass forward pass len 250 with num_input_frames 60 force bsz 1 each chunk should not be longer than 10 if input features are long can t set return_timestamps to false if input features are long need to set generation config make sure that we only have the same begin token len 250 with num_input_frames 60 force bsz 1 make sure that we only have the same begin token automatic decoding with librispeech fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt off fmt on fmt skip fmt off fmt on apply specaugment set model to training mode to enable specaugment fmt off fmt on warm up assisted decoding warm up non assisted decoding assisted decoding non assisted decoding warm up assisted decoding warm up non assisted decoding assisted decoding non assisted decoding fmt off fmt on fmt off fmt on make sure single batch is exactly the same exact match fmt off fmt on computes the output length of the convolutional layers first forward pass signature parameters is an ordereddict so arg_names order is deterministic input embeds is meaningless for an encoder only acoustic model the equivalent test is passing the encoder outputs directly to the model needs to override as the encoder input embedding is a conv1d whisperencoder cannot resize token embeddings since it has no tokens embeddings no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax send pytorch model to the correct device no flax model exists for this class output all for aggressive testing load pytorch class flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model load flax class make sure only flax inputs are forward that actually exist in function args prepare inputs remove function args that don t exist in flax send pytorch inputs to the correct device convert inputs to flax make sure weights are tied in pytorch send pytorch model to the correct device send pytorch model to the correct device first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids change a random masked slice from input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice generate only works with input ids for whisper decoder cannot keep gradients and it s not used enough to be worth fixing
import copy import inspect import os import random import tempfile import time import unittest import numpy as np import pytest import transformers from transformers import WhisperConfig from transformers.testing_utils import ( is_pt_flax_cross_test, require_flash_attn, require_torch, require_torch_fp16, require_torch_gpu, require_torchaudio, slow, torch_device, ) from transformers.utils import cached_property, is_flax_available, is_torch_available from transformers.utils.import_utils import is_datasets_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_datasets_available(): import datasets from datasets import Audio, load_dataset if is_torch_available(): import torch from transformers import ( WhisperFeatureExtractor, WhisperForAudioClassification, WhisperForCausalLM, WhisperForConditionalGeneration, WhisperModel, WhisperProcessor, set_seed, ) from transformers.generation.logits_process import LogitsProcessor from transformers.models.whisper.modeling_whisper import WhisperDecoder, WhisperEncoder, sinusoids class DummyTimestampLogitProcessor(LogitsProcessor): def __init__( self, timestamp_begin, vocab_size, batch_size, max_length, min_space=3, seed=0, is_length_ascending=True ): self.timestamp_begin = timestamp_begin self.vocab_size = vocab_size self.min_space_between_timestamps = min_space self.timestamp_tokens = torch.arange(self.timestamp_begin, self.vocab_size) self.timestamp_tokens.to(torch_device) self.is_length_ascending = is_length_ascending self.no_time_stamp_counter = batch_size * [0] self.prev_highest_timestamp = batch_size * [0] self.batch_size = batch_size self.max_length = max_length self.count = 0 self.let_pass = [[] for _ in range(batch_size)] for k in range(batch_size): random.seed(seed + k) for _ in range(10000): self.let_pass[k].append(random.randint(1, 10) <= 3) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: if input_ids.shape[-1] > 1: scores[:, self.timestamp_begin :] = -float("inf") self.no_time_stamp_counter = [x + 1 for x in self.no_time_stamp_counter] for k in range(input_ids.shape[0]): if self.is_length_ascending and input_ids.shape[0] < self.batch_size: prev_k = k + self.batch_size - input_ids.shape[0] else: prev_k = k if input_ids[k, -1] == self.timestamp_begin: self.no_time_stamp_counter[prev_k] = 0 can_produce = self.no_time_stamp_counter[prev_k] > self.min_space_between_timestamps must_produce = ( input_ids[k][2:].le(self.timestamp_begin).all() and input_ids.shape[-1] == self.max_length - 1 ) if (can_produce and self.let_pass[prev_k][self.count]) or must_produce: self.no_time_stamp_counter[prev_k] = 0 self.prev_highest_timestamp[prev_k] = max(input_ids[k].max() + 1, self.timestamp_tokens[0].item()) scores[k, :] = -float("inf") scores[k, self.prev_highest_timestamp[prev_k]] = 10.0 if ( input_ids.shape[-1] > 3 and input_ids[k, -1].item() in self.timestamp_tokens and input_ids[k, -2].item() not in self.timestamp_tokens ): scores[k, :] = -float("inf") scores[k, input_ids[k, -1].item()] = 10.0 self.count += 1 if torch.isinf(scores).all(): raise ValueError("Dummy logit processor is incorrectly set up. Scores should not be all inf.") return scores if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) def prepare_whisper_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_features": input_features, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class WhisperModelTester: def __init__( self, parent, batch_size=2, seq_length=60, is_training=True, use_labels=False, vocab_size=200, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, max_target_positions=40, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = torch.tensor(self.batch_size * [[self.decoder_start_token_id]], device=torch_device) config = self.get_config() inputs_dict = prepare_whisper_inputs_dict( config, attention_mask=None, input_features=input_features, decoder_input_ids=decoder_input_ids, ) return config, inputs_dict def get_config(self): return WhisperConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, decoder_start_token_id=self.decoder_start_token_id, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict, freeze_encoder=False): model = WhisperModel(config=config).to(torch_device).eval() if freeze_encoder: model.freeze_encoder() input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = WhisperModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = WhisperModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = WhisperEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = WhisperDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (WhisperModel, WhisperForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (WhisperForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "audio-classification": WhisperForAudioClassification, "automatic-speech-recognition": WhisperForConditionalGeneration, "feature-extraction": WhisperModel, "text-generation": WhisperForCausalLM, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False model_split_percents = [0.5, 0.8, 0.9] input_name = "input_features" def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name in [ "AutomaticSpeechRecognitionPipelineTests", "AudioClassificationPipelineTests", ]: return True return False def setUp(self): self.model_tester = WhisperModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_model_forward_with_frozen_encoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs, freeze_encoder=True) def test_requires_grad_with_frozen_encoder(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) model.freeze_encoder() try: encoder_grads = [param.requires_grad for param in model.encoder.parameters()] decoder_grads = [param.requires_grad for param in model.decoder.parameters()] except AttributeError: encoder_grads = [param.requires_grad for param in model.model.encoder.parameters()] decoder_grads = [param.requires_grad for param in model.model.decoder.parameters()] self.assertFalse(all(encoder_grads)) self.assertTrue(all(decoder_grads)) def test_requires_grad_encoder_embed_positions(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) encoder = model.get_encoder() self.assertFalse(encoder.embed_positions.weight.requires_grad) def test_encoder_sinusoidal_embed_positions(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) embeds = model.get_encoder().embed_positions.weight self.assertTrue(torch.allclose(embeds, sinusoids(*embeds.shape))) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def _get_input_ids_and_config(self, batch_size=3): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] input_ids = input_ids[:batch_size, :, :] max_length = 4 if config.eos_token_id is not None and config.pad_token_id is None: config.pad_token_id = config.eos_token_id return config, input_ids, None, max_length def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) decoder_input_ids = inputs.pop("decoder_input_ids", None) inputs.pop("decoder_attention_mask", None) wte = model.get_input_embeddings() inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_generate_with_head_masking(self): pass @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.max_target_positions = 400 input_features = input_dict["input_features"] model = WhisperForConditionalGeneration(config).eval().to(torch_device) input_features = input_features.half() model.half() model.generate(input_features) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_generate_language(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_features = input_dict["input_features"] model = WhisperForConditionalGeneration(config).to(torch_device) model.generation_config.__setattr__("lang_to_id", {"<|en|>": 1}) model.generation_config.__setattr__("task_to_id", {"transcribe": 2}) model.generate(input_features, language="en") model.generate(input_features, language="<|en|>") model.generate(input_features, language="English") def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", 1) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 if "labels" in inputs_dict: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) if model.get_output_embeddings() is None: continue model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) model(**self._prepare_for_class(inputs_dict, model_class)) model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] input_ids = torch.zeros_like(input_ids[:, :1], dtype=torch.long) + torch.tensor( [model._get_decoder_start_token_id()], device=input_ids.device ) attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, mel, seq_length = input_ids.shape subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference(self): import torch for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=True ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, use_flash_attention_2=False ) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) logits = outputs.decoder_hidden_states[-1] logits_fa = outputs_fa.decoder_hidden_states[-1] assert torch.allclose(logits_fa, logits, atol=4e-1) model.train() _ = model_fa(dummy_input, decoder_input_ids=decoder_input_ids) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): import torch for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=True ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=False) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] dummy_input = dummy_input.to(torch.float16) decoder_input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=dummy_input.device, dtype=torch.long) decoder_attention_mask = torch.tensor( [[0, 0, 0, 1, 1, 1]], device=dummy_input.device, dtype=torch.long ) outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) logits = outputs.decoder_hidden_states[-1] logits_fa = outputs_fa.decoder_hidden_states[-1] assert torch.allclose(logits_fa, logits, atol=4e-1) other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "output_hidden_states": True, } outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = outputs.decoder_hidden_states[-1] logits_fa = outputs_fa.decoder_hidden_states[-1] assert torch.allclose(logits_fa[:, -2:], logits[:, -2:], atol=4e-1) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: model.config.use_cache = False input_features = inputs["input_features"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] attention_mask = torch.ones( input_features.shape[0], input_features.shape[-1], device=input_features.device, dtype=input_features.dtype, ) traced_model = torch.jit.trace( model, (input_features, attention_mask, decoder_input_ids, decoder_attention_mask) ) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, input_shape=init_shape, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) pt_model.tie_weights() pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_mask_feature_prob(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.mask_feature_prob = 0.2 config.mask_feature_length = 2 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.train() encoder_last_hidden_state = model(**input_dict).encoder_last_hidden_state self.assertTrue(encoder_last_hidden_state.shape, (13, 30, 16)) def test_mask_time_prob(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.mask_time_prob = 0.2 config.mask_time_length = 2 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.train() encoder_last_hidden_state = model(**input_dict).encoder_last_hidden_state self.assertTrue(encoder_last_hidden_state.shape, (13, 30, 16)) def test_generate_with_prompt_ids_and_task_and_language(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = WhisperForConditionalGeneration(config).eval().to(torch_device) input_features = input_dict["input_features"] prompt_ids = np.arange(5) language = "<|de|>" task = "translate" lang_id = 6 task_id = 7 model.generation_config.__setattr__("lang_to_id", {language: lang_id}) model.generation_config.__setattr__("task_to_id", {task: task_id}) output = model.generate(input_features, max_new_tokens=5, task=task, language=language, prompt_ids=prompt_ids) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, lang_id, task_id, ] for row in output.tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def test_generate_with_prompt_ids_and_forced_decoder_ids(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = WhisperForConditionalGeneration(config).eval().to(torch_device) input_features = input_dict["input_features"] prompt_ids = np.asarray(range(5)) forced_decoder_ids = [(1, 6), (2, 7), (3, 8)] output = model.generate( input_features, max_new_tokens=5, forced_decoder_ids=forced_decoder_ids, prompt_ids=prompt_ids ) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, *[token for _rank, token in forced_decoder_ids], ] for row in output.tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def test_generate_with_prompt_ids_max_length(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.max_target_positions = 5 model = WhisperForConditionalGeneration(config).eval().to(torch_device) input_features = input_dict["input_features"] prompt_ids = np.asarray(range(4)) sliced_prompt_ids = prompt_ids[1:] sliced_prompt_ids = sliced_prompt_ids[-config.max_target_positions // 2 - 1 :] max_new_tokens = 5 with self.assertRaisesRegex( ValueError, f"The length of the sliced `prompt_ids` is {len(sliced_prompt_ids)}, and the `max_new_tokens` " f"{max_new_tokens}. Thus, the combined length of the sliced `prompt_ids` and `max_new_tokens` is: " f"{len(sliced_prompt_ids) + max_new_tokens}. This exceeds the `max_target_positions` of the Whisper model: " f"{config.max_target_positions}. You should either reduce the length of your prompt, or reduce the " f"value of `max_new_tokens`, so that their combined length is less that {config.max_target_positions}.", ): model.generate(input_features, max_new_tokens=max_new_tokens, prompt_ids=prompt_ids) model.generate(input_features, max_new_tokens=1, prompt_ids=prompt_ids) def test_longform_generate_single_batch(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = WhisperForConditionalGeneration(config).eval().to(torch_device) input_features = input_dict["input_features"] long_input_features = torch.cat([input_features.repeat(1, 1, 4), input_features[:, :, :10]], dim=-1) long_input_features = long_input_features[:1] vocab_size = model.config.vocab_size batch_size = 1 num_timestamp_tokens = 20 max_length = 16 logits_processor = [ DummyTimestampLogitProcessor( vocab_size - num_timestamp_tokens, vocab_size, batch_size=batch_size, max_length=max_length, min_space=4, ) ] model.generation_config.max_length = max_length with self.assertRaises(ValueError): _ = model.generate(long_input_features, logits_processor=logits_processor, return_timestamps=False) with self.assertRaises(ValueError): _ = model.generate(long_input_features, logits_processor=logits_processor) timestamp_begin = vocab_size - num_timestamp_tokens model.generation_config.no_timestamps_token_id = timestamp_begin - 1 model.generation_config.eos_token_id = None model.generation_config._detect_timestamp_from_logprob = False model.generation_config.max_initial_timestamp_index = 0 outputs = model.generate(long_input_features, logits_processor=logits_processor, return_segments=True) segments = outputs["segments"][0] for i, segment in enumerate(segments): assert segment["start"] <= segment["end"], "start has to be smaller equal end" assert ( segment["tokens"][0] == model.generation_config.decoder_start_token_id or segment["tokens"][0] >= timestamp_begin ), "First segment token should be a timestamp token" assert any( s > timestamp_begin for s in segment["tokens"][1:] ), f"At least one segment token should be a timestamp token, but not first., {segment['tokens']}" assert ( segment["tokens"].shape[-1] <= max_length ), "make sure that no segment is larger than max generation length" def test_longform_generate_multi_batch(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = WhisperForConditionalGeneration(config).eval().to(torch_device) input_features = input_dict["input_features"].to(torch_device) long_input_features = torch.cat([input_features.repeat(1, 1, 4), input_features[:, :, :10]], dim=-1) long_input_features[:1, :, :200] input_features_2 = long_input_features[1:] attention_mask = torch.ones( (2, long_input_features.shape[-1]), dtype=input_features.dtype, device=input_features.device ) attention_mask[0, 200:] = 0 vocab_size = model.config.vocab_size batch_size = 1 num_timestamp_tokens = 20 max_length = 16 timestamp_begin = vocab_size - num_timestamp_tokens model.generation_config.no_timestamps_token_id = timestamp_begin - 1 model.generation_config.eos_token_id = None model.generation_config._detect_timestamp_from_logprob = False model.generation_config.max_initial_timestamp_index = 0 logits_processor = [ DummyTimestampLogitProcessor( vocab_size - num_timestamp_tokens, vocab_size, batch_size=batch_size, max_length=max_length, min_space=4, seed=1, ) ] outputs_2 = model.generate(input_features_2, logits_processor=logits_processor, return_segments=True) tokens_2 = outputs_2["sequences"][0] segments_2 = outputs_2["segments"][0] batch_size = 2 logits_processor = [ DummyTimestampLogitProcessor( vocab_size - num_timestamp_tokens, vocab_size, batch_size=batch_size, max_length=max_length, min_space=4, seed=0, ) ] outputs = model.generate( long_input_features, attention_mask=attention_mask, logits_processor=logits_processor, return_segments=True ) tokens = outputs["sequences"][1] segments = outputs["segments"][1] assert tokens_2.tolist() == tokens.tolist() for seg1, seg2 in zip(segments_2, segments): assert seg1["start"] == seg2["start"] assert seg1["end"] == seg2["end"] assert seg1["tokens"].tolist() == seg2["tokens"].tolist() @require_torch @require_torchaudio class WhisperModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] @slow def test_tiny_logits_librispeech(self): torch_device = "cpu" set_seed(0) model = WhisperModel.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features with torch.no_grad(): logits = model( input_features, decoder_input_ids=torch.tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) EXPECTED_LOGITS = torch.tensor( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) EXPECTED_GENERATION = torch.tensor( [ -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 ] ) head_logits = logits[0] @ model.decoder.embed_tokens.weight.T self.assertTrue(torch.allclose(head_logits[0, 0, :30].cpu(), EXPECTED_GENERATION, atol=1e-4)) @slow def test_small_en_logits_librispeech(self): set_seed(0) torch_device = "cpu" model = WhisperModel.from_pretrained("openai/whisper-small.en") model.to(torch_device) input_speech = self._load_datasamples(1) feaure_extractor = WhisperFeatureExtractor() input_features = feaure_extractor(input_speech, return_tensors="pt").input_features.to(torch_device) logits = model( input_features, decoder_input_ids=torch.tensor([[model.config.decoder_start_token_id]]), output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T EXPECTED_LOGITS = torch.tensor( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_large_logits_librispeech(self): set_seed(0) torch_device = "cpu" model = WhisperModel.from_pretrained("openai/whisper-large") model.to(torch_device) input_speech = self._load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="pt" ) input_features = processed_inputs.input_features.to(torch_device) decoder_input_ids = processed_inputs.labels.to(torch_device) logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T EXPECTED_LOGITS = torch.tensor( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_tiny_en_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.batch_decode(generated_ids)[0] EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes, and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_generation_multilingual(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") model.to(torch_device) ds = load_dataset("common_voice", "ja", split="test", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." self.assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="translate" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features generated_ids = model.generate(input_features, max_length=20, task="translate") EXPECTED_LOGITS = torch.tensor( [ [50258, 50259, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404], [50258, 50259, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257], [50258, 50259, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904], [50258, 50259, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439] ] ) self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all", ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_en_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, max_length=20).to("cpu") EXPECTED_LOGITS = torch.tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_timestamp_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = np.concatenate(self._load_datasamples(4)) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, max_length=448, return_timestamps=True).to("cpu") EXPECTED_OUTPUT = torch.tensor([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) self.assertTrue(torch.allclose(generated_ids, EXPECTED_OUTPUT)) EXPECTED_TRANSCRIPT = [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is" " Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season" " of the year, with Christmas and roast beef looming before us, similarly drawn from eating and" " its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins'" " work is really Greek after all, and" ), "offsets": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 6.5600000000000005), }, { "text": " Nor is Mr. Quilter's manner less interesting than his matter.", "timestamp": (6.5600000000000005, 11.24), }, { "text": ( " He tells us that at this festive season of the year, with Christmas and roast beef" " looming" ), "timestamp": (11.24, 16.88), }, { "text": ( " before us, similarly drawn from eating and its results occur most readily to the mind." ), "timestamp": (16.88, 23.76), }, { "text": ( " He has grave doubts whether Sir Frederick Latins' work is really Greek after all, and" ), "timestamp": (23.76, 29.44), }, ], } ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True, output_offsets=True) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_token_timestamp_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]] input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generate_outputs = model.generate( input_features, max_length=448, return_timestamps=True, return_token_timestamps=True ) self.assertEqual(generate_outputs.sequences.shape, generate_outputs.token_timestamps.shape) EXPECTED_OUTPUT = torch.tensor([ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4800, 0.8200, 0.9600, 1.1200, 1.1200, 1.2200, 1.5000, 1.7200, 2.0000, 2.3400, 2.5000, 2.6600, 3.1800, 3.5600, 3.6800, 3.8000, 4.1000, 4.3000, 4.5800, 4.9400, 5.3800, 12.4200, 12.8400, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9400, 26.9400, 26.9400, 26.9400, 29.8400 ], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.5200, 0.9000, 1.1400, 1.4200, 1.5200, 1.6800, 1.6800, 1.8800, 2.1000, 2.2200, 2.6200, 3.1400, 3.5800, 3.9600, 4.4000, 17.3000, 17.3000, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7400, 26.7400, 26.7400, 26.7400, 26.7400, 26.7400, 28.0000 ], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.7600, 1.0000, 1.4200, 1.8000, 1.9400, 2.1800, 2.5200, 3.0200, 3.3200, 3.5400, 3.9400, 4.5600, 4.9200, 5.2800, 5.5600, 5.9000, 6.1600, 6.3000, 6.4800, 6.4800, 6.6400, 7.8200, 7.9600, 8.2200, 8.6000, 8.9200, 9.2200, 9.5200, 9.7200, 10.0600, 10.5400, 10.8800, 11.2600, 11.5400, 11.7400, 12.0800, 15.6800, 15.6800], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.7400, 1.0400, 1.3200, 1.6800, 2.1400, 2.4800, 2.7800, 3.0800, 3.1600, 3.4000, 3.6000, 4.0200, 4.2200, 4.8600, 5.2400, 5.7400, 6.3400, 6.6200, 6.7600, 6.7600, 6.8600, 7.2400, 7.4200, 7.6800, 7.9200, 8.4800, 8.7600, 9.2000, 9.2000, 9.4200, 15.8200, 15.8200, 29.6400, 29.6600, 29.6600, 29.6600, 29.6600, 29.7600] ]) self.assertTrue(torch.allclose(generate_outputs.token_timestamps.to("cpu"), EXPECTED_OUTPUT)) @slow def test_tiny_specaugment_librispeech(self): torch_device = "cpu" set_seed(0) model = WhisperModel.from_pretrained("openai/whisper-tiny", apply_spec_augment=True) model.train() model.to(torch_device) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features with torch.no_grad(): logits = model( input_features, decoder_input_ids=torch.tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) EXPECTED_LOGITS = torch.tensor( [ 0.9362, -4.7105, 5.0879, 3.9642, 1.0013, -6.0096, 4.7285, -3.1847, -0.8648, 1.9631, 6.2653, 3.6936, 0.3575, -4.5818, 3.0564, 7.8712, 2.9951, 0.6848, 9.9497, -2.6638, 1.1571, -6.8546, -1.4333, -7.7584, 1.1200, 3.9030, 4.4655, -4.4919, -1.1703, 9.6241 ] ) self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_generate_with_prompt_ids(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(4)[-1:] input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device) output_without_prompt = model.generate(input_features) prompt_ids = processor.get_prompt_ids("Leighton") output_with_prompt = model.generate(input_features, prompt_ids=prompt_ids) expected_without_prompt = "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can discover in it but little of Rocky Ithaca.<|endoftext|>" expected_with_prompt = "<|startofprev|> Leighton<|startoftranscript|><|en|><|transcribe|><|notimestamps|> He has grave doubts whether Sir Frederick Leighton's work is really Greek after all and can discover in it but little of Rocky Ithaca.<|endoftext|>" self.assertEqual(processor.decode(output_without_prompt[0]), expected_without_prompt) self.assertEqual(processor.decode(output_with_prompt[0]), expected_with_prompt) @slow def test_generate_with_prompt_ids_and_forced_decoder_ids(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device) task = "translate" language = "de" expected_tokens = [f"<|{task}|>", f"<|{language}|>"] prompt = "test prompt" prompt_ids = processor.get_prompt_ids(prompt) output = model.generate(input_features, task=task, language=language, prompt_ids=prompt_ids) text = processor.decode(output[0]) self.assertTrue(prompt in text) self.assertTrue(all(token in text for token in expected_tokens)) @slow def test_generate_with_prompt_ids_and_no_non_prompt_forced_decoder_ids(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device) prompt = "test prompt" prompt_ids = processor.get_prompt_ids(prompt) model.generation_config.forced_decoder_ids = None model.config.forced_decoder_ids = None output = model.generate(input_features, prompt_ids=prompt_ids, return_timestamps=True) text = processor.decode(output[0]) self.assertTrue(prompt in text) @slow @require_torch_gpu def test_speculative_decoding_distil(self): torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "openai/whisper-large-v2" model = WhisperForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(torch_device) processor = WhisperProcessor.from_pretrained(model_id) assistant_model_id = "distil-whisper/distil-large-v2" assistant_model = WhisperForCausalLM.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) assistant_model.to(torch_device) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] input_features = processor(sample["array"], return_tensors="pt").input_features.to("cuda").to(torch.float16) _ = model.generate(input_features, assistant_model=assistant_model) _ = model.generate(input_features) start_time = time.time() tokens = model.generate(input_features, assistant_model=assistant_model) total_time_assist = time.time() - start_time transcription_ass = processor.batch_decode(tokens, skip_special_tokens=True) start_time = time.time() tokens = model.generate(input_features) total_time_non_assist = time.time() - start_time transcription_non_ass = processor.batch_decode(tokens, skip_special_tokens=True) assert transcription_ass == transcription_non_ass assert transcription_ass == [ " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel." ] assert total_time_non_assist > total_time_assist, "Make sure that assistant decoding is faster" @slow @require_torch_gpu def test_speculative_decoding_non_distil(self): torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "openai/whisper-large-v2" model = WhisperForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(torch_device) processor = WhisperProcessor.from_pretrained(model_id) assistant_model_id = "openai/whisper-tiny" assistant_model = WhisperForConditionalGeneration.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) assistant_model.to(torch_device) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] input_features = processor(sample["array"], return_tensors="pt").input_features.to("cuda").to(torch.float16) _ = model.generate(input_features, assistant_model=assistant_model) _ = model.generate(input_features) start_time = time.time() tokens = model.generate(input_features, assistant_model=assistant_model) total_time_assist = time.time() - start_time transcription_ass = processor.batch_decode(tokens, skip_special_tokens=True) start_time = time.time() tokens = model.generate(input_features) total_time_non_assist = time.time() - start_time transcription_non_ass = processor.batch_decode(tokens, skip_special_tokens=True) assert transcription_ass == transcription_non_ass assert transcription_ass == [ " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel." ] assert total_time_non_assist > total_time_assist, "Make sure that assistant decoding is faster" @slow def test_whisper_longform_single_batch(self): EXPECTED_TEXT = [' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter\'s manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Layton\'s work is really Greek after all, and can discover in it but little of rocky Ithaca. Linnell\'s pictures are a sort of up-gards and atom paintings, and Mason\'s exquisite idles are as national as a jingo poem. Mr. Birk at Foster\'s landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. Mr. John Collier gives his sitter a cheerful slap in the back, before he says, like a shampoo or a Turkish bath. Next man, it is obviously unnecessary for us to point out how luminous these criticisms are, how delicate an expression. On the general principles of art, Mr. Quilter writes with equal lucidity. he tells us is of a different quality to mathematics, and finish in art is adding more effect. As for etchings, there are two kinds, British and foreign. He laments most bitterly the divorce that has been made between decorative art and what we usually call pictures. Makes the customary appeal to the last judgment and reminds us that in the great days of art Michelangelo was the furnishing upholsterer. Near the fire, any ornaments Fred brought home from India on the mantelboard. In fact, he is quite severe on Mr. Ruskin for not recognizing that a picture should denote the frailty of man. And remarks was pleasing courtesy in Felicitis Grace that many faces are feeling. Only, unfortunately, his own work never does get good. Mr. Quilter has missed his chance, for he has failed even to make himself the Tupper of painting. By Harry Quilter M.A. A man said to the universe, Sir, I exist. Sweat-covered Breon\'s body trickling into the tight-lowing cloth that was the only german he wore. The cut on his chest still dripping blood. The ache of his overstrained eyes, even the soaring arena around him with thousands of spectators, retrovealities not worth thinking about. His instant panic was followed by a small sharp blow high on his chest. One minute, a voice said, and a time buzzer sounded. A minute is not a very large measure of time, and his body needed every fraction of it. The buzzers were triggered his muscles into complete relaxation. Oli\'s heart and lungs worked on at a strong, measured rate. He was in reverie, sliding along the borders of consciousness. The contestants in the twenties needed undisturbed rest. Therefore, nights in the dormitories were as quiet as death. Particularly so, on this last night, when only two of the little cubicles were occupied, The thousands of others standing with dark empty doors. The other voice snapped with a harsh urgency, clearly used to command. I\'m here because the matter is of utmost importance, and brand is the one I must see. Now stand aside. The twenties, he must have drawn his gun because the intruder said quickly, but that away you\'re being a fool. out, through his silence then, and still wondering, Breon was once more asleep. Ten seconds, he asked the handler who was needing his aching muscles. A red-haired mountain of a man, with an apparently inexhaustible store of energy. There could be little art in this last and final round of fencing. Just thrust and parry, and victory to the stronger. man who entered the twenties had his own training tricks. They were appeared to be an immediate association with the death trauma, as if the two were inextricably linked into one. The strength that enables someone in a trance to hold his body stiff and unsupported except at two points, the head and heels. This is physically impossible when conscious. had died before during the 20s and death during the last round was in some ways easier than defeat. Breathing deeply, Breon\'s softly spoke the auto-hypnotic phrases that triggered the process. When the buzzer sounded, he pulled his foil from his second startled grasp and ran forward. Our role looked amazed at the sudden fury of the attack, then smiled. He thought it was the last burst of energy. He knew how close they both were to exhaustion. Breon saw something close to panic on his opponent\'s face when the man finally recognized his error. A wave of despair rolled out from our rogue. Breon sensed it and knew the fifth point was his. Then the powerful twist that\'s rested aside, in and under the guard, because he was sleeping instead of conquering, the lovely rose princess has become a fiddle without a bow, while poor Shaggy sits there, accooing dove. He has gone, and gone for good," answered Polychrom, who had managed to squeeze into the room beside the dragon, and had witnessed the occurrences with much interest. I have remained a prisoner only because I wished to be one. And with says he stepped forward and burst the stout chains as easily as if they had been threads. The little girl had been asleep, but she heard the wraps and opened the door. The king has flooded disgrace, and your friends are asking for you. I begged Ruggadot long ago to send him away, but he would not do so. I also offered to help your brother to escape, but he would not go. He eats and sleeps very steadily, replied the new king. I hope he doesn\'t work too hard, said Shaggy. He doesn\'t work at all. In fact, there\'s nothing he can do in these dominions as well as our gnomes, whose numbers are so great that it worries us to keep them all busy. Not exactly, we\'ve turned Calico. Where is my brother now, inquired Shaggy. In the metal forest. Where is that? The middle forest is in the great domed cavern, the largest and all-ard dominions, replied Calico. Calico hesitated. However, if we look sharp, we may be able to discover one of these secret ways. Oh no, I\'m quite sure he didn\'t. That\'s funny, remarked Betsy thoughtfully. I don\'t believe Anne knew any magic, or she\'d have worked it before. I do not know, confess Shaggy. True, agreed Calico. Calico went to the big gong and pounded on it just as Virgato used to do, but no one answered the summons. Having returned to the Royal Cavern, Calico first pounded the gong and then sat in the throne, wearing Virgato\'s discarded ruby crown and holding in his hand to scepter which reggative head so often thrown at his head.'] processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model = model.to("cuda") ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean") one_audio = np.concatenate([x["array"] for x in ds["validation"]["audio"]], dtype=np.float32) input_features = processor(one_audio, return_tensors="pt", truncation=False, padding="longest")[ "input_features" ] input_features = input_features.to(device="cuda") result = model.generate(input_features, return_timestamps=True) decoded = processor.batch_decode(result, skip_special_tokens=True) assert decoded == EXPECTED_TEXT @slow def test_whisper_longform_multi_batch(self): EXPECTED_TEXT_1 = [" Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Layton's work is really Greek after all, and can discover in it but little of rocky Ithaca. Linnell's pictures are a sort of up-gards and atom paintings, and Mason's exquisite idles are as national as a jingo poem. Mr. Birkett Foster's landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. And Mr. John Collier gives his sitter a cheerful slap in the back, before he says, like a shampooer and a Turkish bath. Next man, it is obviously unnecessary for us to point out how luminous these criticisms are, how delicate an expression. On the general principles of art, Mr. Quilter writes with equal lucidity. Painting he tells us is of a different quality to mathematics, and finish in art is adding more effect. As for etchings, there are two kinds, British and foreign. He laments most bitterly the divorce that has been made between decorative art and what we usually call pictures. Mix a customary appeal to the last judgment and reminds us that in the great days of art Michelangelo was the furnishing a poster or near the fire, and the ornaments Fred brought home from India on the mental board. In fact, he is quite severe on Mr. Ruskin for not recognizing that a picture should denote the frailty of man. And remarks was pleasing courtesy in Felicitis Grace that many faces are feeling. Only unfortunately his own work never does get good. Mr. Quilter has missed his chance, for he has failed even to make himself the Tupper of painting. a Harry Quilter M.A. A man said to the universe, Sir, I exist. Sweat-covered Breon's body trickling into the tight-wing cloth that was the only germany war. The cut on his chest still dripping blood. The ache of his overstrained eyes, even the soaring arena around him with thousands of spectators, retrovealities not worth thinking about. His instant panic was followed by a small sharp blow high on his chest. One minute, a voice said, and a time buzzer sounded. A minute is not a very large measure of time, and his body needed every fraction of it. The buzzers were, triggered his muscles into complete relaxation. Oily his heart and lungs worked on at a strong, measured rate. He was in reverie, sliding along the borders of consciousness. The contestants in the 20s needed undisturbed rest. Therefore, knights in the dormitories were as quiet as death. Particularly so, on this last night, when only two of the little cubicles were occupied, the thousands of others standing with dark empty doors. The other voice snapped with a harsh urgency, clearly used to command. I'm here because the matter is of utmost importance, and brand is the one I must see. Now stand aside. The twenty's he must have drawn his gun, because the intruder said quickly, but that away you're being a fool. Out there was silence then, and still wondering, Breon was once more asleep. Ten seconds he asked the handler who was needing his aching muscles. a red-haired mountain of a man with an apparently inexhaustible store of energy. There could be little art in this last and final round of fencing, just thrust and parry and victory to the stronger. Every man who entered the twenties had his own training tricks. There appeared to be an immediate association with the death trauma as if the two were andextricably linked into one. The strength that enables someone in a trance to hold his body stiff and unsupported except at two points, the head and heels. This is physically impossible when conscious. Others had died before during the twenties and death during the last round was, in some ways, easier than defeat. Breeding deeply, Breon's softly spoke the auto-hypnotic phrases that triggered the process. When the buzzer sounded, he pulled his foil from his second startled grasp and ran forward. I rolled the mazed at the sudden fury of the attack, then smiled. He thought it was the last burst of energy. He knew how close they both were to exhaustion. Breon saw something close to panic on his opponent's face when the man finally recognized his error. A wave of despair rolled out from our rogue, pre-inscented and new to fifth point was his. Then the powerful twist that's rest of the side, in and under the guard, because you were sleeping instead of conquering, the lovely rose princess has become a fiddle without a bow, while poor Shaggy sits there, a cooing dove. He has gone and gone for good, answered Polychrome, who had managed to squeeze into the room beside the dragon, and had witnessed the occurrences with much interest. I have remained a prisoner only because I wished to be one. And with this, he stepped forward and burst the stout chains as easily as if they had been threads. The little girl had been asleep, but she heard the wraps and opened the door. The king has flooded disgrace, and your friends are asking for you. I begged Ruggadot long ago to send him away, but he would not do so. I also offered to help your brother to escape, but he would not go. He eats and sleeps very steadily, replied the new king. I hope he doesn't work too hard, since Shaggy. He doesn't work at all. In fact, there's nothing he can do in these dominions, as well as our gnomes, whose numbers are so great that it worries us to keep them all busy. Not exactly, return Calico. Where is my brother now? choir-dshaggy, in the metal forest. Where is that? The metal forest is in the great domed cavern, the largest and all-ard dominions, replied Calico. Calico hesitated. However, if we look sharp, we may be able to discover one of these secret ways. Oh, no, I'm quite sure he didn't. That's funny, remarked Betsy thoughtfully. I don't believe and knew any magic, or she'd have worked it before. I do not know, confess shaggy. True, a great calico. Calico went to the big gong and pounded on it, just as Virgado used to do, but no one answered the summons. Having returned to the Royal Cavern, Calico first pounded the gong and then sat in the throne, wearing Virgados discarded Ruby Crown, and holding in his hand to scepter, which Virgado had so often thrown at his head. head."] EXPECTED_TEXT_2 = [" Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Layton's work is really Greek after all, and can discover in it but little of rocky Ithaca. Linnell's pictures are a sort of up-gards and atom paintings, and Mason's exquisite idles are as national as a jingo poem. Mr. Burkett Foster's landscapes smile at one much in the same way that Mr. Carker."] EXPECTED_TEXT_3 = [" possible. Nor is Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grieved doubts whether Sir Frederick Layton's work is really greek after all, and can discover in it but little of rocky Ithaca. Linnell's pictures are a sort of up-guards and atom paintings, and Mason's exquisite idles are as national as a jingo poem. Mr. Birk at Foster's landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. And Mr. John Collier gives his sitter a cheerful slap in the back, before he says, like a shampooer and a Turkish bath, next man, it is obviously unnecessary for us to point out how luminous these criticisms are, how delicate an expression. Under general principles of art, Mr. Quilter writes with equal lucidity. Painting, he tells us, is of a different quality to mathematics and finish in art is adding more effect. As for etchings, there are two kinds, British and foreign. He laments most bitterly the divorce that has been made between decorative art and what we usually call pictures. Mix a customary appeal to the last judgment and reminds us that in the great days of art Michelangelo was the furnishing upholsterer. Near the fire. any ornaments Fred brought home from India on the mental board. In fact, he is quite severe on Mr. Ruskin for not recognizing that a picture should denote the frailty of man, and remarks was pleasing courtesy in Felicitis Grace that many faces are feeling. Only, unfortunately, his own work never does get good. Mr. Quilter has missed his chance, for he has failed even to make himself the tupper of painting. By Harry Quilter M.A. A man said to the universe, Sir, I exist. Sweat-covered Breon's body trickling into the titling cloth that was the only german he wore. The cut on his chest still dripping blood. The ache of his overstrained eyes. Even to soaring arena around him with thousands of spectators, retrovealities not worth thinking about. His instant panic was followed by a small sharp blow high on his chest. One minute, a voice said, and a time buzzer sounded. A minute is not a very large measure of time, and his body needed every fraction of it. The buzzers were triggered as muscles into complete relaxation. Oily his heart and lungs worked on at a strong measured rate. He was in In reverie, sliding along the borders of consciousness. The contestants in the 20s needed undisturbed rest. Therefore, nights in the dormitories were as quiet as death. Particularly so, on this last night, when only two of the little cubicles were occupied, the thousands of others standing with dark empty doors. The other voice snapped with a harsh urgency clearly used to command. I'm here because the matter is of utmost importance, and brand is the one I must see. Now stand aside. The twenty's he must have drawn his gun, because the intruder said quickly, but that away you're being a fool. Out there was silence then, and still wondering, Breon was once more asleep. Ten seconds he asked the handler who was needing his aching muscles. a red-haired mountain of a man with an apparently inexhaustible store of energy. There could be little art in this last and final round of fencing, just thrust and parry and victory to the stronger. Every man who entered the twenties had his own training tricks. There appeared to be an immediate association with the death trauma as if the two were andextricably linked into one. The strength that enables someone in a trance to hold his body stiff and unsupported except at two points, the head and heels. This is physically impossible when conscious. Others had died before during the twenties and death during the last round was, in some ways, easier than defeat. Breeding deeply, Breon's softly spoke the auto-hypnotic phrases that triggered the process. When the buzzer sounded, he pulled his foil from his second startled grasp and ran forward. Our role looked amazed at the sudden fury of the attack, then smiled. He thought it was the last burst of energy. He knew how close they both were to exhaustion. Breon saw something close to panic on his opponent's face when the man finally recognized his error. A wave of despair rolled out from our rogue, re-insunced it and knew the fifth point was his. Then the powerful twist that's rest of the side, in and under the guard, because you were sleeping instead of conquering, the lovely rose princess has become a fiddle without a bow, while poor Shaggy sits there, a cooing dove. He has gone and gone for good, answered Polychrome, who had managed to squeeze into the room beside the dragon, and had witnessed the occurrences with much interest. I have remained a prisoner only because I wished to be one. And with this, he stepped forward and burst the stout chains as easily as if they had been threads. The little girl had been asleep, but she heard the wraps and opened the door. The king has fled and disgraced, and your friends are asking for you. I begged Ruggadot long ago to send him away, but he would not do so. I also offered to help your brother to escape, but he would not go. He eats and sleeps very steadily, replied the new king. I hope he doesn't work too hard, since Shaggy. He doesn't work at all. In fact, there's nothing he can do in these dominions as well as our gnomes, whose numbers are so great that it worries us to keep them all busy. Not exactly, we've turned Calico. Where is my brother now? quared shaggy. In the metal forest. Where is that? The metal forest is in the great domed cavern, the largest and all-ard dominions, replied Calico. Calico hesitated. However, if we look sharp, we may be able to discover one of these secret ways. Oh no, I'm quite sure he didn't. And that's funny, remarked Betsy thoughtfully. I don't believe Anne knew any magic, or she'd have worked it before. I do not know, confess Shaggy. True, a great calico. Calico went to the big gong and pounded on it, just as we're good to have used to do, but no one answered the summons. Having returned to the Royal Cavern, Calico first pounded the gong and then sat in the thrown wearing ruggedos discarded ruby crown and holding in his hand to septor which Ruggato had so often thrown at his head."] EXPECTED_TEXT_4 = [' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter\'s manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similes drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Layton\'s work is really Greek after all, and can discover in it but little of rocky Ithaca. Linnell\'s pictures are a sort of up-gards and atom paintings, and Mason\'s exquisite idles are as national as a jingo poem. Mr. Birk at Foster\'s landscapes smile at one much in the same way that Mr. Carker used to flash his teeth. Mr. John Collier gives his sitter a cheerful slap in the back, before he says, like a shampoo or a Turkish bath. Next man, it is obviously unnecessary for us to point out how luminous these criticisms are, how delicate an expression. On the general principles of art, Mr. Quilter writes with equal lucidity. he tells us is of a different quality to mathematics, and finish in art is adding more effect. As for etchings, there are two kinds, British and foreign. He laments most bitterly the divorce that has been made between decorative art and what we usually call pictures. Makes the customary appeal to the last judgment and reminds us that in the great days of art Michelangelo was the furnishing upholsterer. Near the fire, any ornaments Fred brought home from India on the mantelboard. In fact, he is quite severe on Mr. Ruskin for not recognizing that a picture should denote the frailty of man. And remarks was pleasing courtesy in Felicitis Grace that many faces are feeling. Only, unfortunately, his own work never does get good. Mr. Quilter has missed his chance, for he has failed even to make himself the Tupper of painting. By Harry Quilter M.A. A man said to the universe, Sir, I exist. Sweat-covered Breon\'s body trickling into the tight-lowing cloth that was the only german he wore. The cut on his chest still dripping blood. The ache of his overstrained eyes, even the soaring arena around him with thousands of spectators, retrovealities not worth thinking about. His instant panic was followed by a small sharp blow high on his chest. One minute, a voice said, and a time buzzer sounded. A minute is not a very large measure of time, and his body needed every fraction of it. The buzzers were triggered his muscles into complete relaxation. Oli\'s heart and lungs worked on at a strong, measured rate. He was in reverie, sliding along the borders of consciousness. The contestants in the twenties needed undisturbed rest. Therefore, nights in the dormitories were as quiet as death. Particularly so, on this last night, when only two of the little cubicles were occupied, The thousands of others standing with dark empty doors. The other voice snapped with a harsh urgency, clearly used to command. I\'m here because the matter is of utmost importance, and brand is the one I must see. Now stand aside. The twenties, he must have drawn his gun because the intruder said quickly, but that away you\'re being a fool. out, through his silence then, and still wondering, Breon was once more asleep. Ten seconds, he asked the handler who was needing his aching muscles. A red-haired mountain of a man, with an apparently inexhaustible store of energy. There could be little art in this last and final round of fencing. Just thrust and parry, and victory to the stronger. man who entered the twenties had his own training tricks. They were appeared to be an immediate association with the death trauma, as if the two were inextricably linked into one. The strength that enables someone in a trance to hold his body stiff and unsupported except at two points, the head and heels. This is physically impossible when conscious. had died before during the 20s and death during the last round was in some ways easier than defeat. Breathing deeply, Breon\'s softly spoke the auto-hypnotic phrases that triggered the process. When the buzzer sounded, he pulled his foil from his second startled grasp and ran forward. Our role looked amazed at the sudden fury of the attack, then smiled. He thought it was the last burst of energy. He knew how close they both were to exhaustion. Breon saw something close to panic on his opponent\'s face when the man finally recognized his error. A wave of despair rolled out from our rogue. Breon sensed it and knew the fifth point was his. Then the powerful twist that\'s rested aside, in and under the guard, because he was sleeping instead of conquering, the lovely rose princess has become a fiddle without a bow, while poor Shaggy sits there, accooing dove. He has gone, and gone for good," answered Polychrom, who had managed to squeeze into the room beside the dragon, and had witnessed the occurrences with much interest. I have remained a prisoner only because I wished to be one. And with says he stepped forward and burst the stout chains as easily as if they had been threads. The little girl had been asleep, but she heard the wraps and opened the door. The king has flooded disgrace, and your friends are asking for you. I begged Ruggadot long ago to send him away, but he would not do so. I also offered to help your brother to escape, but he would not go. He eats and sleeps very steadily, replied the new king. I hope he doesn\'t work too hard, said Shaggy. He doesn\'t work at all. In fact, there\'s nothing he can do in these dominions as well as our gnomes, whose numbers are so great that it worries us to keep them all busy. Not exactly, we\'ve turned Calico. Where is my brother now, inquired Shaggy. In the metal forest. Where is that? The middle forest is in the great domed cavern, the largest and all-ard dominions, replied Calico. Calico hesitated. However, if we look sharp, we may be able to discover one of these secret ways. Oh no, I\'m quite sure he didn\'t. That\'s funny, remarked Betsy thoughtfully. I don\'t believe Anne knew any magic, or she\'d have worked it before. I do not know, confess Shaggy. True, agreed Calico. Calico went to the big gong and pounded on it just as Virgato used to do, but no one answered the summons. Having returned to the Royal Cavern, Calico first pounded the gong and then sat in the throne, wearing Virgato\'s discarded ruby crown and holding in his hand to scepter which reggative head so often thrown at his head.'] processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model = model.to("cuda") ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean") one_audio = np.concatenate([x["array"] for x in ds["validation"]["audio"]], dtype=np.float32) audios = [] audios.append(one_audio[110000:]) audios.append(one_audio[:800000]) audios.append(one_audio[80000:]) audios.append(one_audio[:]) decoded_single = [] for audio in audios: inputs = processor(audio, return_tensors="pt", truncation=False) inputs = inputs.to(device="cuda") result = model.generate(**inputs, return_timestamps=True) decoded_single.append(processor.batch_decode(result, skip_special_tokens=True)) inputs = processor( audios, return_tensors="pt", truncation=False, padding="longest", return_attention_mask=True ) inputs = inputs.to(device="cuda") result = model.generate(**inputs, return_timestamps=True) decoded_all = processor.batch_decode(result, skip_special_tokens=True) assert decoded_all[0:1] == decoded_single[0] assert decoded_all[1:2] == decoded_single[1] assert decoded_all[2:3] == decoded_single[2] assert decoded_all[3:4] == decoded_single[3] assert decoded_all[0:1] == EXPECTED_TEXT_1 assert decoded_all[1:2] == EXPECTED_TEXT_2 assert decoded_all[2:3] == EXPECTED_TEXT_3 assert decoded_all[3:4] == EXPECTED_TEXT_4 @slow def test_whisper_longform_multi_batch_hard(self): EXPECTED_TEXT = [ " Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out a fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct dennies. set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile!", " Folks, I spend a lot of time right over there, night after night after night, actually. Carefully selecting for you the day's noosiest, most aerodynamic headlines, stress testing, and those topical anti-lock breaks and power steering, painstakingly stitching, leather seating so soft, it would make JD power and her associates blush to create the luxury sedan that is my nightly monologue. But sometimes, you sometimes, folks. I lurched a consciousness in the back of an abandoned school and slap myself awake with a crusty floor mat. Before using a mouse-bitten timing belt to strap some old plywood to a couple of discarded oil drums, then by the light of a heathen moon, render a gas tank out of an empty big gulp, fill with white claw and denatured alcohol, then light a match and let her rip and the demented one man soapbox derby of news that is my segment. Me, Guadalupe! No!", " Ladies and gentlemen, you know, I spent a lot of time right over there Raising the finest Holstein news cattle firmly yet tenderly milking the latest headlines from their jokes swollen teats Churning the daily stories into the decadent proven-style style triple cream breed that is my nightly monologue But sometimes sometimes folks I stagger home hungry after being released by the police and Root around in the neighbor's trash can for an old milk carton scrape out the blooming dairy residue into the remains of a wet cheese rod I won from a rat in a pre-donned street fight. Put it in a discarded paint can to leave it to ferment next to a trash fire then hunker down and hallucinate while eating the listeria laden demon custard of news that is my segment. You mean one of them.", " Folks, if you watch this show, you know I spend most of my time right over there carefully sorting through the day's biggest stories and selecting only the most subtle and unblemished ostrich and crocodile news leather, which I then entrust to artisan graduates of the Ichol Gregoire Ferrandi, who carefully dye them in a palette of bright zesty shades and adorn them in the finest and most topical inlay work using hand tools and double magnifying glasses, then assemble them according to now classic and elegant geometry using our signature saddles stitching. In line it with bees, wax, coated linen, finely attached a mallet, hammered strap, pearled hardware, and close-shit to create for you the one-of-a-kind hoke couture, Erme's Birkin bag that is my monologue. But sometimes, sometimes folks, sometimes. Sometimes I wake up in the last car of an abandoned roller coaster at Coney Island where I'm I'm hiding from the triads. I have some engine lubricants out of a safe way bag and stagger down the shore to tear the sail off a beach schooner. Then I rip the coaxial cable out of an RV and elderly couple from Utah, Hank, and Mabel lovely folks. And use it to stitch the sail into a loose pouch like a rock sack. And I stow away in the back of a garbage truck to the junkyard where I pick through to the debris for only the broken toys that make me the saddest until I have loaded for you. The Hobo Fugitives bug out, bindle of news that is my segment. Me one!", " You know, folks, I spent a lot of time crafting for you a bespoke playlist of the day's biggest stories right over there. Meticulously selecting the most topical chakra affirming scented candles, and using Feng Shui to perfectly align the joke energy in the exclusive boutique yoga retreat that is my monologue. But sometimes just sometimes I go to the dumpster behind the waffle house at three in the morning, take off my shirt, cover myself, and used fry oil, wrap my hands with some double-duct tape by stole from the broken car window. Pound a six-pack of blueberry hard-seltzer and a sack of pills I stole from a parked ambulance. Then arm wrestle a raccoon in the back alley vision quest of news that is my segment. Meanwhile!", " You know, folks, I spend most of my time right over there. Mining the day's biggest, most important stories, collecting the finest, most topical iron or hand hammering it into joke panels. Then I craft sheets of bronze and blazing with patterns that tell an epic tale of conquest and glory. Then, using the Germanic tradition press-black process, I place thin sheets of foil against the scenes and by hammering or otherwise applying pressure from the back, I project these scenes into a pair of cheat cards in a faceplate and, finally, using fluted strips of white alloyed molding, I divide the designs into framed panels and hold it all together using bronze rivets to create the beautiful and intimidating, Anglo-Saxon battle helm that is my nightly monologue. Sometimes, sometimes folks. Sometimes, just sometimes, I come into my sense as fully naked on the deck of a pirate besieged melee container ship that picked me up floating on the detached door of a portapotty in the Indian Ocean. Then after a sunstroke-induced realization of the crew of this ship plans to sell me an exchange for a bag of oranges to fight off scurvy, I lead a mutiny using only a PVC pipe at a pool chain that accepting my new role as Captain and declaring myself king of the windarc seas. I grab a dirty mop bucket covered in barnacles and adorn it with the teeth of the vanquished to create the sopping wet pirate crown of news that is my segment. Meanwhile!", " Folks, if you watch this show, you know I spend most of my time right over there carefully blending for you the day's Newsiest most topical flower eggs milk and butter and Stranding into a fine batter to make delicate and informative comedy pancakes Then I glaze them in the juice and zest of the most relevant midnight Valencia oranges and douse it all and a fine Dela main de voyage cognac Before prom baying and basting them tables. I deserve for you the James Beard award worthy crepe suzzette That is my nightly monologue, but sometimes just sometimes folks. I wake up in the baggage hold of Greyhound bus. It's being hoisted by the scrap yard claw toward the burn pit. Escape to a nearby abandoned price chopper where I scrounge for old bread scraps and busted open bags of starfruit candies and expired eggs. Chuck it all on a dirty hubcap and slap it over a tire fire before using the legs of a strain, pair of sweatpants and as oven mitts to extract and serve the demented transience poundcake of news that is my segment. Me, Guadalupe!", " Folks, if you watched the show and I hope you do, I spent a lot of time right over there. Tiredlessly studying the lineage of the days most important thoroughbred stories and whole-stiner headlines, working with the best trainers, money can buy to rear their comedy offspring with a hand that is stern yet gentle into the triple crown winning equine specimen. That is my nightly monologue, but sometimes, sometimes, folks, I break into an unincorporated veterinary genetics lab and grab whatever test tubes I can find and then under a grow light I got from a discarded chia pet. I mixed the pilfered DNA of a horse and whatever was in a tube labeled Keith Colan extra. Slurrying the concoction with caffeine pills and a microwave red bull, I screamed, sang a prayer to Janice, initiator of human life and God of transformation as a half horse, half man, freak. Seizes to life before me and the hideous collection of loose animal parts and corrupted man tissue that is my segment. Meanwhile!", ] processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model = model.to("cuda") ds = load_dataset("distil-whisper/meanwhile", "default")["test"] ds = ds.cast_column("audio", Audio(sampling_rate=16000)) num_samples = 8 audio = ds[:num_samples]["audio"] audios = [x["array"] for x in audio] decoded_single = [] for audio in audios: inputs = processor(audio, return_tensors="pt", truncation=False, sampling_rate=16_000) inputs = inputs.to(device="cuda") result = model.generate(**inputs, return_timestamps=True) decoded_single += processor.batch_decode(result, skip_special_tokens=True) inputs = processor( audios, return_tensors="pt", truncation=False, padding="longest", return_attention_mask=True ) inputs = inputs.to(device="cuda") result = model.generate(**inputs, return_timestamps=True) decoded_all = processor.batch_decode(result, skip_special_tokens=True) for i in range(num_samples): assert decoded_all[i] == decoded_single[i] assert decoded_all[i] == EXPECTED_TEXT[i] def prepare_whisper_encoder_inputs_dict(config, input_features, head_mask=None): if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) return {"input_features": input_features, "head_mask": head_mask} @require_torch class WhisperEncoderModelTester: def __init__( self, parent, batch_size=2, seq_length=60, is_training=True, use_labels=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, num_mel_bins=80, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, classifier_proj_size=4, num_labels=2, is_encoder_decoder=False, is_decoder=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens self.classifier_proj_size = classifier_proj_size self.num_labels = num_labels self.is_encoder_decoder = is_encoder_decoder self.is_decoder = is_decoder def get_config(self): return WhisperConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, classifier_proj_size=self.classifier_proj_size, num_labels=self.num_labels, is_encoder_decoder=self.is_encoder_decoder, is_decoder=self.is_decoder, ) def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length]) config = self.get_config() inputs_dict = prepare_whisper_encoder_inputs_dict( config, input_features=input_features, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths @property def encoder_seq_length(self): return self.get_subsampled_output_lengths(self.seq_length) def create_and_check_model_forward(self, config, inputs_dict, freeze_encoder=False): model = WhisperForAudioClassification(config=config).to(torch_device).eval() if freeze_encoder: model.freeze_encoder() input_features = inputs_dict["input_features"] last_hidden_state = model(input_features).logits self.parent.assertTrue(last_hidden_state.shape, (13, 2)) @require_torch class WhisperEncoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (WhisperForAudioClassification,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_pruning = False test_missing_keys = False input_name = "input_features" def setUp(self): self.model_tester = WhisperEncoderModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features", "head_mask", "encoder_outputs"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") def test_cpu_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") def test_disk_offload_bin(self): pass @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") def test_model_parallelism(self): pass def test_inputs_embeds(self): pass def test_encoder_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) with torch.no_grad(): outputs = model(**inputs)[0] input_ids = inputs["input_features"] del inputs["input_features"] encoder = model.encoder with torch.no_grad(): inputs["encoder_outputs"] = encoder(input_ids) outputs_embeds = model(**inputs)[0] self.assertTrue((outputs_embeds == outputs).all()) def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Conv1d)) model.set_input_embeddings(torch.nn.Conv1d(10, 10, 3)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, torch.nn.Conv1d)) def test_resize_tokens_embeddings(self): pass @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, input_shape=init_shape, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) pt_model = model_class(config).eval() pt_model.config.use_cache = False fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_inputs = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) pt_model.tie_weights() pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) class WhisperStandaloneDecoderModelTester: def __init__( self, parent, batch_size=2, is_training=True, use_labels=False, vocab_size=200, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, max_target_positions=40, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = torch.tensor( self.batch_size * [[self.decoder_start_token_id, 3, 3, 7, 2]], device=torch_device ) config = self.get_config() config.is_encoder_decoder = False inputs_dict = prepare_whisper_inputs_dict( config, attention_mask=None, input_features=input_features, decoder_input_ids=decoder_input_ids, ) inputs_dict.pop("input_features") inputs_dict.pop("head_mask") inputs_dict.pop("decoder_head_mask") inputs_dict.pop("cross_attn_head_mask") inputs_dict["attention_mask"] = inputs_dict.pop("decoder_attention_mask") inputs_dict["input_ids"] = inputs_dict.pop("decoder_input_ids") return config, inputs_dict @property def encoder_seq_length(self): return 5 @property def seq_length(self): return 5 def get_config(self): return WhisperConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, decoder_start_token_id=self.decoder_start_token_id, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["input_ids"][:, -1] = self.pad_token_id return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config, input_features = self.prepare_config_and_inputs() input_ids = input_features["input_ids"] encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size]) return (config, input_ids, encoder_hidden_states) def create_and_check_decoder_model_past(self, config, input_ids): config.use_cache = True model = WhisperDecoder(config=config).to(torch_device).eval() outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past(self, config, input_ids): model = WhisperDecoder(config=config).to(torch_device).eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[ "last_hidden_state" ] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) @require_torch class WhisperStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (WhisperDecoder, WhisperForCausalLM) if is_torch_available() else () all_generative_model_classes = (WhisperForCausalLM,) if is_torch_available() else () fx_comptatible = False test_pruning = False is_encoder_decoder = False test_missing_keys = False def setUp(self): self.model_tester = WhisperStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=WhisperConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config, inputs_dict = config_and_inputs self.model_tester.create_and_check_decoder_model_past(config=config, input_ids=inputs_dict["input_ids"]) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config, inputs_dict = config_and_inputs self.model_tester.create_and_check_decoder_model_attention_mask_past( config=config, input_ids=inputs_dict["input_ids"] ) @unittest.skip("Generate needs input ids") def test_generate_without_input_ids(self): pass @unittest.skip("Decoder can't keep attention grads") def test_retain_grad_hidden_states_attentions(self): return @unittest.skip("The model doesn't support fast init from base") def test_save_load_fast_init_from_base(self): pass @unittest.skip("The model doesn't support left padding") def test_left_padding_compatibility(self): pass
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import shutil import tempfile import unittest import pytest from transformers import WhisperTokenizer, is_speech_available from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio from .test_feature_extraction_whisper import floats_list if is_speech_available(): from transformers import WhisperFeatureExtractor, WhisperProcessor TRANSCRIBE = 50358 NOTIMESTAMPS = 50362 @require_torch @require_torchaudio @require_sentencepiece class WhisperProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "openai/whisper-small.en" self.tmpdirname = tempfile.mkdtemp() def get_tokenizer(self, **kwargs): return WhisperTokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return WhisperFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = WhisperProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, WhisperTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = WhisperProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, WhisperTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) def test_get_decoder_prompt_ids(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) forced_decoder_ids = processor.get_decoder_prompt_ids(task="transcribe", no_timestamps=True) self.assertIsInstance(forced_decoder_ids, list) for ids in forced_decoder_ids: self.assertIsInstance(ids, (list, tuple)) expected_ids = [TRANSCRIBE, NOTIMESTAMPS] self.assertListEqual([ids[-1] for ids in forced_decoder_ids], expected_ids) def test_get_prompt_ids(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) prompt_ids = processor.get_prompt_ids("Mr. Quilter") decoded_prompt = processor.tokenizer.decode(prompt_ids) self.assertListEqual(prompt_ids.tolist(), [50360, 1770, 13, 2264, 346, 353]) self.assertEqual(decoded_prompt, "<|startofprev|> Mr. Quilter") def test_empty_get_prompt_ids(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) prompt_ids = processor.get_prompt_ids("") decoded_prompt = processor.tokenizer.decode(prompt_ids) self.assertListEqual(prompt_ids.tolist(), [50360, 220]) self.assertEqual(decoded_prompt, "<|startofprev|> ") def test_get_prompt_ids_with_special_tokens(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) def _test_prompt_error_raised_helper(prompt, special_token): with pytest.raises(ValueError) as excinfo: processor.get_prompt_ids(prompt) expected = f"Encountered text in the prompt corresponding to disallowed special token: {special_token}." self.assertEqual(expected, str(excinfo.value)) _test_prompt_error_raised_helper("<|startofprev|> test", "<|startofprev|>") _test_prompt_error_raised_helper("test <|notimestamps|>", "<|notimestamps|>") _test_prompt_error_raised_helper("test <|zh|> test <|transcribe|>", "<|zh|>")
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test converttokentoid and convertidtotoken token where tokenid 14436 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 self assertequalvocabkeys1 self assertequalvocabkeys1 30 00 self assertequallenvocabkeys 51865 def testvocabsizeself self assertequalself gettokenizer vocabsize 50258 def testfulltokenizerself tokenizer whispertokenizer frompretrainedself tmpdirname tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a test self assertlistequal tokenizer converttokenstoidstokens 5723 307 257 220 31636 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens i was born in 9 2000 and this is fals fmt skip fmt skip ids tokenizer converttokenstoidstokens self assertlistequalids 40 390 4232 294 1722 25743 11 293 220 11176 307 16720 526 13 backtokens tokenizer convertidstotokensids self assertlistequal backtokens i was born in 9 2000 and this is fals fmt skip fmt skip def testtokenizerslowstorefullsignatureself pass def testtokenizerfaststorefullsignatureself pass def testspecialtokensinitializationself whisper relies on specific additional special tokens so we skip this general test in particular this test loads fast tokenizer from slow tokenizer and the conversion uses prefixtokens where we reference additional special tokens by specific indices hence overriding the list with less tokens leads to out of index error pass slow def testtokenizerintegrationself expectedencoding inputids 50257 50362 41762 364 357 36234 1900 355 12972 13165 354 12 35636 364 290 12972 13165 354 12 5310 13363 12 4835 8 3769 2276 12 29983 45619 357 13246 51 11 402 11571 12 17 11 5564 13246 38586 11 16276 44 11 4307 346 33 861 11 16276 7934 23029 329 12068 15417 28491 357 32572 52 8 290 12068 15417 16588 357 32572 38 8 351 625 3933 10 2181 13363 4981 287 1802 10 8950 290 2769 48817 1799 1022 449 897 11 9485 15884 354 290 309 22854 37535 13 50256 50257 50362 13246 51 318 3562 284 662 12 27432 2769 8406 4154 282 24612 422 9642 9608 276 2420 416 26913 21143 319 1111 1364 290 826 4732 287 477 11685 13 50256 50257 50362 464 2068 7586 21831 18045 625 262 16931 3290 13 50256 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnameopenaiwhispertiny en paddingfalse def testoutputoffsetsself tokenizer self gettokenizer previoussequence 51492 406 3163 1953 466 13 51612 51612 self assertequal tokenizer decodeprevioussequence outputoffsetstrue text not worth thinking about offsets text not worth thinking about timestamp 22 56 24 96 merge when the previous sequence is a suffix of the next sequence nextsequences1 50364 295 6177 3391 11 19817 3337 507 307 406 3163 1953 466 13 50614 50614 2812 9836 14783 390 6263 538 257 1359 11 8199 6327 1090 322 702 7443 13 50834 50257 fmt skip self assertequal tokenizer decodenextsequences1 outputoffsetstrue text of spectators retrievality is not worth thinking about his instant panic was followed by a small sharp blow high on his chest endoftext offsets text of spectators retrievality is not worth thinking about timestamp 0 0 5 0 text his instant panic was followed by a small sharp blow high on his chest timestamp 5 0 9 4 def testfindlongestcommonsubsequenceself previoussequence 1 2 3 nextsequence 2 3 4 5 merge findlongestcommonsequenceprevioussequence nextsequence self assertequalmerge 1 2 3 4 5 now previous is larger than next we merge what we can and remove the extra right side of the left sequence previoussequence 1 2 3 4 5 6 7 nextsequence 2 3 4 5 merge findlongestcommonsequenceprevioussequence nextsequence self assertequalmerge 1 2 3 4 5 nothing in common previoussequence 1 2 3 nextsequence 4 5 6 merge findlongestcommonsequenceprevioussequence nextsequence self assertequalmerge 1 2 3 4 5 6 some errors in the overlap we take from previous on the left from the next on the right of the overlap previoussequence 1 2 3 4 99 nextsequence 2 98 4 5 6 merge findlongestcommonsequenceprevioussequence nextsequence self assertequalmerge 1 2 3 4 5 6 we take from previous on the left from the next on the right of the overlap previoussequence 1 2 99 4 5 nextsequence 2 3 4 98 6 merge findlongestcommonsequenceprevioussequence nextsequence self assertequalmerge 1 2 99 4 98 6 this works on 3 sequences seq1 1 2 3 seq2 2 3 4 seq3 3 4 5 merge findlongestcommonsequenceseq1 seq2 seq3 self assertequalmerge 1 2 3 4 5 this works on 3 sequences with errors seq1 1 2 3 98 5 seq2 2 99 4 5 6 7 seq3 4 97 6 7 8 merge findlongestcommonsequenceseq1 seq2 seq3 self assertequalmerge 1 2 3 4 5 6 7 8 def testskipspecialtokensskipspromptidsself tokenizer self gettokenizer rusttokenizer self getrusttokenizer fmt off encodedinput 50361 2221 13 2326 388 391 50258 50259 50359 50363 1282 264 2674 9156 295 1523 11 2221 13 2326 388 391 13657 365 2681 21296 17711 13 50257 fmt on expectedwithspecialtokens startofprev mr quilterstartoftranscriptentranscribenotimestamps on the general principles of art mr quilter writes with equal lucidity endoftext expectedwithoutspecialtokens on the general principles of art mr quilter writes with equal lucidity self assertequaltokenizer decodeencodedinput skipspecialtokensfalse expectedwithspecialtokens self assertequaltokenizer decodeencodedinput skipspecialtokenstrue expectedwithoutspecialtokens self assertequalrusttokenizer decodeencodedinput skipspecialtokensfalse expectedwithspecialtokens self assertequal rusttokenizer decodeencodedinput skipspecialtokenstrue expectedwithoutspecialtokens def testskipspecialtokenswithtimestampsself tokenizer self gettokenizer rusttokenizer self getrusttokenizer fmt off encodedinput 50258 50363 50364 634 575 12525 22618 1968 6144 35617 20084 1756 311 589 307 534 10281 934 439 293 50676 50676 393 4411 294 309 457 707 295 33301 286 392 6628 13 50836 50257 fmt on expectedwithspecialtokens startoftranscriptnotimestamps0 00 he has grave doubts whether sir frederick layton s work is really greek after all and6 246 24 can discover in it but little of rocky ithaca 9 44endoftext expectedwithoutspecialtokens 0 00 he has grave doubts whether sir frederick layton s work is really greek after all and6 246 24 can discover in it but little of rocky ithaca 9 44 self assertequal tokenizer decodeencodedinput decodewithtimestampstrue skipspecialtokensfalse expectedwithspecialtokens self assertequal tokenizer decodeencodedinput decodewithtimestampstrue skipspecialtokenstrue expectedwithoutspecialtokens self assertequal rusttokenizer decodeencodedinput decodewithtimestampstrue skipspecialtokensfalse expectedwithspecialtokens self assertequal rusttokenizer decodeencodedinput decodewithtimestampstrue skipspecialtokenstrue expectedwithoutspecialtokens def testfasttokenizergetpromptidsself tokenizer self gettokenizer rusttokenizer self getrusttokenizer prompt this is test prompt text tokenizerpromptids tokenizer getpromptidsprompt fasttokenizerpromptids rusttokenizer getpromptidsprompt self assertlistequaltokenizerpromptids tolist fasttokenizerpromptids tolist def testcombinetokensintowordsself tokenizer self gettokenizer rusttokenizer self getrusttokenizer whatever whatever said someone clever encodedinput 1363 7969 503 1363 7969 1 848 1580 11 13494 7323 expectedwords whatever whatever said someone clever expectedtokens 1363 7969 503 1363 7969 1 848 1580 11 13494 7323 expectedindices 0 1 2 3 4 5 6 7 8 9 10 output combinetokensintowordstokenizer encodedinput self assertequalexpectedwords output0 self assertequalexpectedtokens output1 self assertequalexpectedindices output2 outputrust combinetokensintowordsrusttokenizer encodedinput self assertequalexpectedwords outputrust0 self assertequalexpectedtokens outputrust1 self assertequalexpectedindices outputrust2 def testbasicnormalizerself tokenizer self gettokenizer rusttokenizer self getrusttokenizer inputstr hola gey expectedoutputnormalize hola gey expectedoutputdiacritics hola guey tokenizer tests encodedinput tokenizerinputstr inputids decodedoutput tokenizer decodeencodedinput skipspecialtokenstrue basicnormalizefalse self assertequaldecodedoutput inputstr decodedoutputnormalize tokenizer decodeencodedinput skipspecialtokenstrue basicnormalizetrue self assertequaldecodedoutputnormalize expectedoutputnormalize decodedoutputdiacritics tokenizer decode encodedinput skipspecialtokenstrue basicnormalizetrue removediacriticstrue self assertequaldecodedoutputdiacritics expectedoutputdiacritics fast tokenizer tests encodedinput rusttokenizerinputstr inputids decodedoutput rusttokenizer decodeencodedinput skipspecialtokenstrue basicnormalizefalse self assertequaldecodedoutput inputstr decodedoutputnormalize rusttokenizer decodeencodedinput skipspecialtokenstrue basicnormalizetrue self assertequaldecodedoutputnormalize expectedoutputnormalize decodedoutputdiacritics rusttokenizer decode encodedinput skipspecialtokenstrue basicnormalizetrue removediacriticstrue self assertequaldecodedoutputdiacritics expectedoutputdiacritics class speechtotexttokenizermultilinguialtestunittest testcase checkpointname openaiwhispersmall en classmethod def setupclasscls cls tokenizer whispertokenizer whispertokenizer frompretrainedcls checkpointname return cls def testtokenizerequivalenceself text multilingualtokenizer whispertokenizer frompretrainedopenaiwhispertiny languagekorean monolingualtokenizer whispertokenizer frompretrainedopenaiwhispertiny en monolingualtokens monolingualtokenizer encodetext addspecialtokensfalse multilingualtokens multilingualtokenizer encodetext addspecialtokensfalse assert monolingualtokenizer decodemonolingualtokens text assert multilingualtokenizer decodemultilingualtokens text assert lenmonolingualtokens lenmultilingualtokens fmt off expectedeng 46695 97 167 252 234 168 98 238 220 169 245 234 23821 111 229 167 108 242 169 222 112 168 245 238 220 169 225 222 166 111 254 169 234 234 expectedmulti 9835 22855 168 98 238 13431 234 43517 229 47053 169 222 19086 19840 1313 17974 fmt on self assertlistequalmonolingualtokens expectedeng self assertlistequalmultilingualtokens expectedmulti def testtokenizerspecialself multilingualtokenizer whispertokenizer frompretrained openaiwhispertiny languageenglish tasktranscribe text hey how are you feeling j ai l impression que est prt multilingualtokens multilingualtokenizer encodetext fmt off format startoftranscript langid task notimestamps transcription ids endoftext expectedmulti startoftranscript encode transcribe notimestamps 7057 0 1012 366 291 2633 30 508 6 1301 287 6 36107 631 220 11178 115 15567 871 44393 endoftranscript expectedspecialtext startoftranscriptentranscribenotimestampshey how are you feeling j ai l impression que est prtendoftext fmt on self assertlistequalmultilingualtokens expectedmulti specialtranscript multilingualtokenizer decodemultilingualtokens skipspecialtokensfalse self assertequalspecialtranscript expectedspecialtext transcript multilingualtokenizer decodemultilingualtokens skipspecialtokenstrue self assertequaltranscript text def testvocabsizeself self assertequalself tokenizer vocabsize 50257 copied from tests models speechtotext testtokenizationspeechtotext speechtotexttokenizermultilinguialtest testtokenizerdecodeignoreslanguagecodes def testtokenizerdecodeignoreslanguagecodesself self assertinescode self tokenizer allspecialids generatedids escode 4 1601 47 7647 2 result self tokenizer decodegeneratedids skipspecialtokenstrue expectedspanish self tokenizer decodegeneratedids1 skipspecialtokenstrue self assertequalresult expectedspanish self assertnotinself tokenizer eostoken result def testbatchencodingself multilingualtokenizer whispertokenizer frompretrained openaiwhispertiny languagespanish tasktranslate batch el gato el gato se sent batchoutput multilingualtokenizer batchencodeplusbatch paddingtrue inputids fmt off expectedmulti startoftranscript escode translate notimestamps 17356 290 2513 220 endoftranscript endoftranscript endoftranscript startoftranscript escode translate notimestamps 17356 290 2513 369 2279 812 endoftranscript fmt on self assertlistequalbatchoutput expectedmulti def testsetprefixtokensself multilingualtokenizer whispertokenizer frompretrained openaiwhispertiny languagespanish tasktranslate change the language prefix token from spanish to english multilingualtokenizer setprefixtokenslanguageenglish batch the cat the cat sat batchoutput multilingualtokenizer batchencodeplusbatch paddingtrue inputids fmt off expectedmulti startoftranscript encode translate notimestamps 3322 3857 endoftranscript endoftranscript startoftranscript encode translate notimestamps 3322 3857 3227 endoftranscript fmt on self assertlistequalbatchoutput expectedmulti def testbatchencodingdecodingself multilingualtokenizer whispertokenizer frompretrainedopenaiwhispertiny languagespanish batch hola gey que onda batchencoding multilingualtokenizer batchencodeplusbatch paddingtrue inputids transcription multilingualtokenizer batchdecodebatchencoding skipspecialtokenstrue self assertlistequalbatch transcription def testoffsetdecodingself multilingualtokenizer whispertokenizer frompretrainedopenaiwhispertiny fmt off inputtokens 50258 50259 50359 50364 441 1857 4174 11 5242 366 257 1333 295 493 2794 2287 293 12018 14880 11 293 25730 311 454 34152 4496 904 50724 50724 366 382 4048 382 257 361 18459 13065 13 2221 13 7145 74 325 38756 311 29822 7563 412 472 709 294 264 51122 51122 912 636 300 2221 13 2741 5767 1143 281 7319 702 7798 13 400 2221 13 2619 4004 811 2709 702 51449 51449 50257 fmt on output multilingualtokenizer decodeinputtokens outputoffsetstrueoffsets self assertequal output text lennils pictures are a sort of upguards and atom paintings and mason s exquisite idles timestamp 0 0 7 2 text are as national as a jingo poem mr birkut foster s landscapes smile at one much in the timestamp 7 2 15 16 text same way that mr carker used to flash his teeth and mr john colier gives his timestamp 15 16 21 7 test decodewithoffsets output multilingualtokenizer decodeinputtokens decodewithtimestampstrue self assertequal output startoftranscriptentranscribe0 00 lennils pictures are a sort of upguards and atom paintings and mason s exquisite idles7 207 20 are as national as a jingo poem mr birkut foster s landscapes smile at one much in the15 1615 16 same way that mr carker used to flash his teeth and mr john colier gives his21 7021 70endoftext test a single sequence with timestamps fmt off inputtokens 50364 441 1857 4174 11 5242 366 257 1333 295 493 2794 2287 293 12018 14880 11 293 25730 311 454 34152 4496 904 50724 fmt on output multilingualtokenizer decodeinputtokens outputoffsetstrueoffsets self assertequal output0 text lennils pictures are a sort of upguards and atom paintings and mason s exquisite idles timestamp 0 0 7 2 test a sequence without a single timestamps fmt off inputtokens 441 1857 4174 11 5242 366 257 1333 295 493 2794 2287 293 12018 14880 11 293 25730 311 454 34152 4496 904 50724 fmt on output multilingualtokenizer decodeinputtokens outputoffsetstrueoffsets self assertequaloutput requirejinja def testtokenizationforchatself multilingualtokenizer whispertokenizer frompretrainedopenaiwhispertiny this is in english but it s just here to make sure the chat control tokens are being added properly testchats role system content you are a helpful chatbot role user content hello role system content you are a helpful chatbot role user content hello role assistant content nice to meet you role assistant content nice to meet you role user content hello tokenizedchats multilingualtokenizer applychattemplatetestchat for testchat in testchats expectedtokens 3223 366 257 4961 5081 18870 13 50257 15947 0 50257 3223 366 257 4961 5081 18870 13 50257 15947 0 50257 37717 220 1353 1677 291 13 50257 37717 220 1353 1677 291 13 50257 15947 0 50257 for tokenizedchat expectedtokens in ziptokenizedchats expectedtokens self assertlistequaltokenizedchat expectedtokens 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license test _convert_token_to_id and _convert_id_to_token fmt skip fmt skip fmt skip fmt skip whisper relies on specific additional special tokens so we skip this general test in particular this test loads fast tokenizer from slow tokenizer and the conversion uses prefix_tokens where we reference additional special tokens by specific indices hence overriding the list with less tokens leads to out of index error fmt skip merge when the previous sequence is a suffix of the next sequence fmt skip now previous is larger than next we merge what we can and remove the extra right side of the left sequence nothing in common some errors in the overlap we take from previous on the left from the next on the right of the overlap we take from previous on the left from the next on the right of the overlap this works on 3 sequences this works on 3 sequences with errors fmt off fmt on fmt off fmt on whatever whatever said someone clever tokenizer tests fast tokenizer tests fmt off fmt on fmt off format startoftranscript lang id task notimestamps transcription ids endoftext fmt on copied from tests models speech_to_text test_tokenization_speech_to_text speechtotexttokenizermultilinguialtest test_tokenizer_decode_ignores_language_codes fmt off fmt on change the language prefix token from spanish to english fmt off fmt on fmt off fmt on test decode_with_offsets test a single sequence with timestamps fmt off fmt on test a sequence without a single timestamps fmt off fmt on this is in english but it s just here to make sure the chat control tokens are being added properly
import unittest from transformers.models.whisper import WhisperTokenizer, WhisperTokenizerFast from transformers.models.whisper.tokenization_whisper import _combine_tokens_into_words, _find_longest_common_sequence from transformers.testing_utils import require_jinja, slow from ...test_tokenization_common import TokenizerTesterMixin ES_CODE = 50262 EN_CODE = 50259 END_OF_TRANSCRIPT = 50257 START_OF_TRANSCRIPT = 50258 TRANSLATE = 50358 TRANSCRIBE = 50359 NOTIMESTAMPS = 50363 class WhisperTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = WhisperTokenizer rust_tokenizer_class = WhisperTokenizerFast test_rust_tokenizer = True test_sentencepiece = False test_seq2seq = False def setUp(self): super().setUp() tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny") tokenizer.pad_token_id = 50256 tokenizer.pad_token = "<|endoftext|>" tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = "Where" token_id = 14436 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "!") self.assertEqual(vocab_keys[1], '"') self.assertEqual(vocab_keys[-1], "<|30.00|>") self.assertEqual(len(vocab_keys), 51865) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 50258) def test_full_tokenizer(self): tokenizer = WhisperTokenizer.from_pretrained(self.tmpdirname) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["This", "Ġis", "Ġa", "Ġ", "test"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [5723, 307, 257, 220, 31636], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, ["I", "Ġwas", "Ġborn", "Ġin", "Ġ9", "2000", ",", "Ġand", "Ġ", "this", "Ġis", "Ġfals", "é", "."], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [40, 390, 4232, 294, 1722, 25743, 11, 293, 220, 11176, 307, 16720, 526, 13]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, ["I", "Ġwas", "Ġborn", "Ġin", "Ġ9", "2000", ",", "Ġand", "Ġ", "this", "Ġis", "Ġfals", "é", "."], ) def test_tokenizer_slow_store_full_signature(self): pass def test_tokenizer_fast_store_full_signature(self): pass def test_special_tokens_initialization(self): pass @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[50257, 50362, 41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13, 50256], [50257, 50362, 13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13, 50256], [50257, 50362, 464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13, 50256]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="openai/whisper-tiny.en", padding=False ) def test_output_offsets(self): tokenizer = self.get_tokenizer() previous_sequence = [51492, 406, 3163, 1953, 466, 13, 51612, 51612] self.assertEqual( tokenizer.decode(previous_sequence, output_offsets=True), { "text": " not worth thinking about.", "offsets": [{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}], }, ) next_sequences_1 = [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] self.assertEqual( tokenizer.decode(next_sequences_1, output_offsets=True), { "text": ( " of spectators, retrievality is not worth thinking about. His instant panic was followed by a" " small, sharp blow high on his chest.<|endoftext|>" ), "offsets": [ {"text": " of spectators, retrievality is not worth thinking about.", "timestamp": (0.0, 5.0)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (5.0, 9.4), }, ], }, ) def test_find_longest_common_subsequence(self): previous_sequence = [1, 2, 3] next_sequence = [2, 3, 4, 5] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 3, 4, 5]) previous_sequence = [1, 2, 3, 4, 5, 6, 7] next_sequence = [2, 3, 4, 5] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 3, 4, 5]) previous_sequence = [1, 2, 3] next_sequence = [4, 5, 6] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 3, 4, 5, 6]) previous_sequence = [1, 2, 3, 4, 99] next_sequence = [2, 98, 4, 5, 6] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 3, 4, 5, 6]) previous_sequence = [1, 2, 99, 4, 5] next_sequence = [2, 3, 4, 98, 6] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 99, 4, 98, 6]) seq1 = [1, 2, 3] seq2 = [2, 3, 4] seq3 = [3, 4, 5] merge = _find_longest_common_sequence([seq1, seq2, seq3]) self.assertEqual(merge, [1, 2, 3, 4, 5]) seq1 = [1, 2, 3, 98, 5] seq2 = [2, 99, 4, 5, 6, 7] seq3 = [4, 97, 6, 7, 8] merge = _find_longest_common_sequence([seq1, seq2, seq3]) self.assertEqual(merge, [1, 2, 3, 4, 5, 6, 7, 8]) def test_skip_special_tokens_skips_prompt_ids(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() encoded_input = [ 50361, 2221, 13, 2326, 388, 391, 50258, 50259, 50359, 50363, 1282, 264, 2674, 9156, 295, 1523, 11, 2221, 13, 2326, 388, 391, 13657, 365, 2681, 21296, 17711, 13, 50257, ] expected_with_special_tokens = "<|startofprev|> Mr. Quilter<|startoftranscript|><|en|><|transcribe|><|notimestamps|> On the general principles of art, Mr. Quilter writes with equal lucidity.<|endoftext|>" expected_without_special_tokens = " On the general principles of art, Mr. Quilter writes with equal lucidity." self.assertEqual(tokenizer.decode(encoded_input, skip_special_tokens=False), expected_with_special_tokens) self.assertEqual(tokenizer.decode(encoded_input, skip_special_tokens=True), expected_without_special_tokens) self.assertEqual(rust_tokenizer.decode(encoded_input, skip_special_tokens=False), expected_with_special_tokens) self.assertEqual( rust_tokenizer.decode(encoded_input, skip_special_tokens=True), expected_without_special_tokens ) def test_skip_special_tokens_with_timestamps(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() encoded_input = [ 50258, 50363, 50364, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 293, 50676, 50676, 393, 4411, 294, 309, 457, 707, 295, 33301, 286, 392, 6628, 13, 50836, 50257, ] expected_with_special_tokens = "<|startoftranscript|><|notimestamps|><|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all and<|6.24|><|6.24|> can discover in it but little of rocky Ithaca.<|9.44|><|endoftext|>" expected_without_special_tokens = "<|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all and<|6.24|><|6.24|> can discover in it but little of rocky Ithaca.<|9.44|>" self.assertEqual( tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=False), expected_with_special_tokens, ) self.assertEqual( tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=True), expected_without_special_tokens, ) self.assertEqual( rust_tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=False), expected_with_special_tokens, ) self.assertEqual( rust_tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=True), expected_without_special_tokens, ) def test_fast_tokenizer_get_prompt_ids(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() prompt = "This is test prompt text." tokenizer_prompt_ids = tokenizer.get_prompt_ids(prompt) fast_tokenizer_prompt_ids = rust_tokenizer.get_prompt_ids(prompt) self.assertListEqual(tokenizer_prompt_ids.tolist(), fast_tokenizer_prompt_ids.tolist()) def test_combine_tokens_into_words(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() encoded_input = [1363, 7969, 503, 1363, 7969, 1, 848, 1580, 11, 13494, 7323] expected_words = ["whatever", ' "whatever"', " said", " someone,", " clever!?"] expected_tokens = [[1363, 7969], [503, 1363, 7969, 1], [848], [1580, 11], [13494, 7323]] expected_indices = [[0, 1], [2, 3, 4, 5], [6], [7, 8], [9, 10]] output = _combine_tokens_into_words(tokenizer, encoded_input) self.assertEqual(expected_words, output[0]) self.assertEqual(expected_tokens, output[1]) self.assertEqual(expected_indices, output[2]) output_rust = _combine_tokens_into_words(rust_tokenizer, encoded_input) self.assertEqual(expected_words, output_rust[0]) self.assertEqual(expected_tokens, output_rust[1]) self.assertEqual(expected_indices, output_rust[2]) def test_basic_normalizer(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() input_str = "Hola güey!" expected_output_normalize = "hola güey " expected_output_diacritics = "hola guey " encoded_input = tokenizer(input_str).input_ids decoded_output = tokenizer.decode(encoded_input, skip_special_tokens=True, basic_normalize=False) self.assertEqual(decoded_output, input_str) decoded_output_normalize = tokenizer.decode(encoded_input, skip_special_tokens=True, basic_normalize=True) self.assertEqual(decoded_output_normalize, expected_output_normalize) decoded_output_diacritics = tokenizer.decode( encoded_input, skip_special_tokens=True, basic_normalize=True, remove_diacritics=True ) self.assertEqual(decoded_output_diacritics, expected_output_diacritics) encoded_input = rust_tokenizer(input_str).input_ids decoded_output = rust_tokenizer.decode(encoded_input, skip_special_tokens=True, basic_normalize=False) self.assertEqual(decoded_output, input_str) decoded_output_normalize = rust_tokenizer.decode(encoded_input, skip_special_tokens=True, basic_normalize=True) self.assertEqual(decoded_output_normalize, expected_output_normalize) decoded_output_diacritics = rust_tokenizer.decode( encoded_input, skip_special_tokens=True, basic_normalize=True, remove_diacritics=True ) self.assertEqual(decoded_output_diacritics, expected_output_diacritics) class SpeechToTextTokenizerMultilinguialTest(unittest.TestCase): checkpoint_name = "openai/whisper-small.en" @classmethod def setUpClass(cls): cls.tokenizer: WhisperTokenizer = WhisperTokenizer.from_pretrained(cls.checkpoint_name) return cls def test_tokenizer_equivalence(self): text = "다람쥐 헌 쳇바퀴에 타고파" multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny", language="korean") monolingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny.en") monolingual_tokens = monolingual_tokenizer.encode(text, add_special_tokens=False) multilingual_tokens = multilingual_tokenizer.encode(text, add_special_tokens=False) assert monolingual_tokenizer.decode(monolingual_tokens) == text assert multilingual_tokenizer.decode(multilingual_tokens) == text assert len(monolingual_tokens) > len(multilingual_tokens) EXPECTED_ENG = [ 46695, 97, 167, 252, 234, 168, 98, 238, 220, 169, 245, 234, 23821, 111, 229, 167, 108, 242, 169, 222, 112, 168, 245, 238, 220, 169, 225, 222, 166, 111, 254, 169, 234, 234 ] EXPECTED_MULTI = [ 9835, 22855, 168, 98, 238, 13431, 234, 43517, 229, 47053, 169, 222, 19086, 19840, 1313, 17974 ] self.assertListEqual(monolingual_tokens, EXPECTED_ENG) self.assertListEqual(multilingual_tokens, EXPECTED_MULTI) def test_tokenizer_special(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained( "openai/whisper-tiny", language="english", task="transcribe" ) text = "Hey! How are you feeling? J'ai l'impression que 郷さん est prêt" multilingual_tokens = multilingual_tokenizer.encode(text) EXPECTED_MULTI = [ START_OF_TRANSCRIPT, EN_CODE, TRANSCRIBE, NOTIMESTAMPS, 7057, 0, 1012, 366, 291, 2633, 30, 508, 6, 1301, 287, 6, 36107, 631, 220, 11178, 115, 15567, 871, 44393, END_OF_TRANSCRIPT ] EXPECTED_SPECIAL_TEXT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>Hey! How are you feeling? " "J'ai l'impression que 郷さん est prêt<|endoftext|>" ) self.assertListEqual(multilingual_tokens, EXPECTED_MULTI) special_transcript = multilingual_tokenizer.decode(multilingual_tokens, skip_special_tokens=False) self.assertEqual(special_transcript, EXPECTED_SPECIAL_TEXT) transcript = multilingual_tokenizer.decode(multilingual_tokens, skip_special_tokens=True) self.assertEqual(transcript, text) def test_vocab_size(self): self.assertEqual(self.tokenizer.vocab_size, 50257) def test_tokenizer_decode_ignores_language_codes(self): self.assertIn(ES_CODE, self.tokenizer.all_special_ids) generated_ids = [ES_CODE, 4, 1601, 47, 7647, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_spanish = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_spanish) self.assertNotIn(self.tokenizer.eos_token, result) def test_batch_encoding(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained( "openai/whisper-tiny", language="spanish", task="translate" ) batch = ["El gato ", "El gato se sentó"] batch_output = multilingual_tokenizer.batch_encode_plus(batch, padding=True).input_ids EXPECTED_MULTI = [ [START_OF_TRANSCRIPT, ES_CODE, TRANSLATE, NOTIMESTAMPS, 17356, 290, 2513, 220, END_OF_TRANSCRIPT, END_OF_TRANSCRIPT, END_OF_TRANSCRIPT], [START_OF_TRANSCRIPT, ES_CODE, TRANSLATE, NOTIMESTAMPS, 17356, 290, 2513, 369, 2279, 812, END_OF_TRANSCRIPT] ] self.assertListEqual(batch_output, EXPECTED_MULTI) def test_set_prefix_tokens(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained( "openai/whisper-tiny", language="spanish", task="translate" ) multilingual_tokenizer.set_prefix_tokens(language="english") batch = ["the cat", "the cat sat"] batch_output = multilingual_tokenizer.batch_encode_plus(batch, padding=True).input_ids EXPECTED_MULTI = [ [START_OF_TRANSCRIPT, EN_CODE, TRANSLATE, NOTIMESTAMPS, 3322, 3857, END_OF_TRANSCRIPT, END_OF_TRANSCRIPT], [START_OF_TRANSCRIPT, EN_CODE, TRANSLATE, NOTIMESTAMPS, 3322, 3857, 3227, END_OF_TRANSCRIPT] ] self.assertListEqual(batch_output, EXPECTED_MULTI) def test_batch_encoding_decoding(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny", language="spanish") batch = ["hola güey", "que onda"] batch_encoding = multilingual_tokenizer.batch_encode_plus(batch, padding=True).input_ids transcription = multilingual_tokenizer.batch_decode(batch_encoding, skip_special_tokens=True) self.assertListEqual(batch, transcription) def test_offset_decoding(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny") INPUT_TOKENS = [ 50258, 50259, 50359, 50364, 441, 1857, 4174, 11, 5242, 366, 257, 1333, 295, 493, 2794, 2287, 293, 12018, 14880, 11, 293, 25730, 311, 454, 34152, 4496, 904, 50724, 50724, 366, 382, 4048, 382, 257, 361, 18459, 13065, 13, 2221, 13, 7145, 74, 325, 38756, 311, 29822, 7563, 412, 472, 709, 294, 264, 51122, 51122, 912, 636, 300, 2221, 13, 2741, 5767, 1143, 281, 7319, 702, 7798, 13, 400, 2221, 13, 2619, 4004, 811, 2709, 702, 51449, 51449, 50257 ] output = multilingual_tokenizer.decode(INPUT_TOKENS, output_offsets=True)["offsets"] self.assertEqual( output, [ { "text": ( " Lennils, pictures are a sort of upguards and atom paintings, and Mason's exquisite idles" ), "timestamp": (0.0, 7.2), }, { "text": ( " are as national as a jingo poem. Mr. Birkut Foster's landscapes smile at one much in the" ), "timestamp": (7.2, 15.16), }, { "text": " same way that Mr. Carker used to flash his teeth. And Mr. John Colier gives his", "timestamp": (15.16, 21.7), }, ], ) output = multilingual_tokenizer.decode(INPUT_TOKENS, decode_with_timestamps=True) self.assertEqual( output, "<|startoftranscript|><|en|><|transcribe|><|0.00|> Lennils, pictures are a sort of upguards and atom" " paintings, and Mason's exquisite idles<|7.20|><|7.20|> are as national as a jingo poem. Mr. Birkut" " Foster's landscapes smile at one much in the<|15.16|><|15.16|> same way that Mr. Carker used to flash" " his teeth. And Mr. John Colier gives his<|21.70|><|21.70|><|endoftext|>", ) INPUT_TOKENS = [ 50364, 441, 1857, 4174, 11, 5242, 366, 257, 1333, 295, 493, 2794, 2287, 293, 12018, 14880, 11, 293, 25730, 311, 454, 34152, 4496, 904, 50724 ] output = multilingual_tokenizer.decode(INPUT_TOKENS, output_offsets=True)["offsets"] self.assertEqual( output[0], { "text": " Lennils, pictures are a sort of upguards and atom paintings, and Mason's exquisite idles", "timestamp": (0.0, 7.2), }, ) INPUT_TOKENS = [ 441, 1857, 4174, 11, 5242, 366, 257, 1333, 295, 493, 2794, 2287, 293, 12018, 14880, 11, 293, 25730, 311, 454, 34152, 4496, 904, 50724 ] output = multilingual_tokenizer.decode(INPUT_TOKENS, output_offsets=True)["offsets"] self.assertEqual(output, []) @require_jinja def test_tokenization_for_chat(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny") test_chats = [ [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}], [ {"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Nice to meet you."}, ], [{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}], ] tokenized_chats = [multilingual_tokenizer.apply_chat_template(test_chat) for test_chat in test_chats] expected_tokens = [ [3223, 366, 257, 4961, 5081, 18870, 13, 50257, 15947, 0, 50257], [3223, 366, 257, 4961, 5081, 18870, 13, 50257, 15947, 0, 50257, 37717, 220, 1353, 1677, 291, 13, 50257], [37717, 220, 1353, 1677, 291, 13, 50257, 15947, 0, 50257], ] for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens): self.assertListEqual(tokenized_chat, expected_tokens)
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch xclip model import inspect import os import tempfile import unittest import numpy as np from huggingfacehub import hfhubdownload from transformers import xclipconfig xcliptextconfig xclipvisionconfig from transformers testingutils import requiretorch requiretorchmultigpu requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import xclipmodel xcliptextmodel xclipvisionmodel from transformers models xclip modelingxclip import xclippretrainedmodelarchivelist if isvisionavailable from transformers import xclipprocessor class xclipvisionmodeltester def init self parent batchsize8 imagesize30 patchsize2 numchannels3 numframes8 important the batch size time must be divisible by the number of frames istrainingtrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 mithiddensize64 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self numframes numframes self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self mithiddensize mithiddensize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensor self batchsize self numframes self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return xclipvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels numframesself numframes hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize mithiddensizeself mithiddensize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model xclipvisionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequal result lasthiddenstate shape self batchsize self numframes numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self numframes self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class xclipvisionmodeltestmodeltestermixin unittest testcase allmodelclasses xclipvisionmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester xclipvisionmodeltesterself self configtester configtester self configclassxclipvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonxclip does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonxclipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonxclipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in xclippretrainedmodelarchivelist 1 model xclipvisionmodel frompretrainedmodelname self assertisnotnonemodel def testgradientcheckpointingbackwardcompatibilityself config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses if not modelclass supportsgradientcheckpointing continue printmodel class modelclass config gradientcheckpointing true model modelclassconfig self asserttruemodel isgradientcheckpointing def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true we add 1 here due to the special message token in xclip s vision encoder seqlen getattrself modeltester seqlength none 1 encoderseqlength getattrself modeltester encoderseqlength seqlen for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequallenoutputs attentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequallenoutputs attentions self modeltester numhiddenlayers self assertlistequal listoutputs attentions0 shape3 self modeltester numattentionheads encoderseqlength encoderseqlength outlen lenoutputs check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 1 lenoutputs selfattentions outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength encoderseqlength requiretorchmultigpu def testmultigpudataparallelforwardself config inputsdict self modeltester prepareconfigandinputsforcommon some params shouldn t be scattered by nn dataparallel so just remove them if they are present blacklistnonbatchedparams headmask decoderheadmask crossattnheadmask for k in blacklistnonbatchedparams inputsdict popk none move input tensors to cuda o for k v in inputsdict items if torch istensorv inputsdictk v to0 for modelclass in self allmodelclasses model modelclassconfigconfig model to0 model eval wrap model in nn dataparallel model nn dataparallelmodel with torch nograd test self prepareforclassinputsdict modelclass for k v in test items if isinstancev torch tensor printk v shape else printk v modelself prepareforclassinputsdict modelclass class xcliptextmodeltester def init self parent batchsize8 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return xcliptextconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange def createandcheckmodelself config inputids inputmask model xcliptextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class xcliptextmodeltestmodeltestermixin unittest testcase allmodelclasses xcliptextmodel if istorchavailable else fxcompatible false testpruning false testheadmasking false def setupself self modeltester xcliptextmodeltesterself self configtester configtesterself configclassxcliptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonxclip does not use inputsembeds def testinputsembedsself pass unittest skipreasonxcliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonxcliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in xclippretrainedmodelarchivelist 1 model xcliptextmodel frompretrainedmodelname self assertisnotnonemodel class xclipmodeltester def init self parent textkwargsnone visionkwargsnone projectiondim64 mithiddensize64 istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self projectiondim projectiondim self mithiddensize mithiddensize self textmodeltester xcliptextmodeltesterparent textkwargs self visionmodeltester xclipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig self visionmodeltester prepareconfigandinputs pixelvalues floatstensor self visionmodeltester batchsize self visionmodeltester numframes self visionmodeltester numchannels self visionmodeltester imagesize self visionmodeltester imagesize config self getconfig return config inputids attentionmask pixelvalues def getconfigself return xclipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondimself projectiondim def createandcheckmodelself config inputids attentionmask pixelvalues model xclipmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask self parent assertequal result logitspervideo shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues returnloss true return config inputsdict requiretorch class xclipmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses xclipmodel if istorchavailable else pipelinemodelmapping featureextraction xclipmodel if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false testtorchscript false maxdiff none def setupself self modeltester xclipmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonxclipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass unittest skipreasonxclipmodel does not support feedforward chunking def testfeedforwardchunkingself pass override as the logitscale promptsgenerator alpha parameters require special treatment def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized elif name promptsgenerator alpha self assertalmostequalparam data mean item model config promptalpha else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues xclip needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save xclipconfig and check if we can load xclipvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig xclipvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save xclipconfig and check if we can load xcliptextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig xcliptextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in xclippretrainedmodelarchivelist 1 model xclipmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on a spaghetti video def preparevideo file hfhubdownload repoidhfinternaltestingspaghettivideo filenameeatingspaghetti8frames npy repotypedataset video np loadfile return listvideo requirevision requiretorch class xclipmodelintegrationtestunittest testcase slow def testinferenceself modelname microsoftxclipbasepatch32 model xclipmodel frompretrainedmodelname totorchdevice processor xclipprocessor frompretrainedmodelname video preparevideo inputs processor textplaying sports eating spaghetti go shopping videosvideo returntensorspt paddingtrue totorchdevice forward pass with torch nograd outputs modelinputs verify the logits self assertequal outputs logitspervideo shape torch sizeinputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape torch sizeinputs inputids shape0 inputs pixelvalues shape0 expectedlogits torch tensor14 0181 20 2771 14 4776 devicetorchdevice self asserttruetorch allcloseoutputs logitspervideo expectedlogits atol1e3 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch xclip model important the batch size time must be divisible by the number of frames in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as x clip does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic we add 1 here due to the special message token in x clip s vision encoder check that output_attentions also work using config check attention is always last and order is fine some params shouldn t be scattered by nn dataparallel so just remove them if they are present move input tensors to cuda o wrap model in nn dataparallel override as the logit_scale prompts_generator alpha parameters require special treatment check if logit_scale is initilized as per the original implementation to be sure we have no nan x clip needs pixel_values save xclipconfig and check if we can load xclipvisionconfig from it save xclipconfig and check if we can load xcliptextconfig from it we will verify our results on a spaghetti video forward pass verify the logits
import inspect import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import XCLIPModel, XCLIPTextModel, XCLIPVisionModel from transformers.models.x_clip.modeling_x_clip import XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import XCLIPProcessor class XCLIPVisionModelTester: def __init__( self, parent, batch_size=8, image_size=30, patch_size=2, num_channels=3, num_frames=8, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, mit_hidden_size=64, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_frames = num_frames self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.mit_hidden_size = mit_hidden_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size * self.num_frames, self.num_channels, self.image_size, self.image_size] ) config = self.get_config() return config, pixel_values def get_config(self): return XCLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, mit_hidden_size=self.mit_hidden_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = XCLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size * self.num_frames, num_patches + 1, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_frames, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class XCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (XCLIPVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = XCLIPVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=XCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="X-CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="XCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="XCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = XCLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue print("Model class:", model_class) config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) + 1 encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(outputs.attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], ) out_len = len(outputs) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], ) @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() model = nn.DataParallel(model) with torch.no_grad(): test = self._prepare_for_class(inputs_dict, model_class) for k, v in test.items(): if isinstance(v, torch.Tensor): print(k, v.shape) else: print(k, v) _ = model(**self._prepare_for_class(inputs_dict, model_class)) class XCLIPTextModelTester: def __init__( self, parent, batch_size=8, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return XCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = XCLIPTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class XCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (XCLIPTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = XCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=XCLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="X-CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="XCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="XCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = XCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class XCLIPModelTester: def __init__( self, parent, text_kwargs=None, vision_kwargs=None, projection_dim=64, mit_hidden_size=64, is_training=True, ): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.projection_dim = projection_dim self.mit_hidden_size = mit_hidden_size self.text_model_tester = XCLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = XCLIPVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, _ = self.vision_model_tester.prepare_config_and_inputs() pixel_values = floats_tensor( [ self.vision_model_tester.batch_size, self.vision_model_tester.num_frames, self.vision_model_tester.num_channels, self.vision_model_tester.image_size, self.vision_model_tester.image_size, ] ) config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return XCLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=self.projection_dim, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = XCLIPModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_video.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size), ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class XCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (XCLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": XCLIPModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False maxdiff = None def setUp(self): self.model_tester = XCLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="XCLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="XCLIPModel does not support feedforward chunking") def test_feed_forward_chunking(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif name == "prompts_generator.alpha": self.assertAlmostEqual(param.data.mean().item(), model.config.prompt_alpha) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = XCLIPVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = XCLIPTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = XCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_8_frames.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_vision @require_torch class XCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "microsoft/xclip-base-patch32" model = XCLIPModel.from_pretrained(model_name).to(torch_device) processor = XCLIPProcessor.from_pretrained(model_name) video = prepare_video() inputs = processor( text=["playing sports", "eating spaghetti", "go shopping"], videos=video, return_tensors="pt", padding=True ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertEqual( outputs.logits_per_video.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[14.0181, 20.2771, 14.4776]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_video, expected_logits, atol=1e-3))
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite from common since attentionmask in combination with causalmask behaves slighly differently prepare inputs load corresponding pytorch class flax models don t use the usecache option and cache is not returned as a default so we disable usecache here for pytorch model overwrite from common since attentionmask in combination with causalmask behaves slighly differently prepare inputs load corresponding pytorch class make sure weights are tied in pytorch coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license overwrite from common since attention_mask in combination with causal_mask behaves slighly differently prepare inputs load corresponding pytorch class skip the flax at the beginning flax models don t use the use_cache option and cache is not returned as a default so we disable use_cache here for pytorch model overwrite from common since attention_mask in combination with causal_mask behaves slighly differently prepare inputs load corresponding pytorch class skip the flax at the beginning make sure weights are tied in pytorch
import tempfile import unittest import transformers from transformers import XGLMConfig, XGLMTokenizer, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, require_sentencepiece, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp import numpy as np from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.xglm.modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel if is_torch_available(): import torch @require_flax class FlaxXGLMModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length], self.vocab_size), 3, self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) return (config, input_ids, input_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config, input_ids, attention_mask = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) attention_mask_cache = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_sentencepiece @require_flax class FlaxXGLMModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxXGLMModel, FlaxXGLMForCausalLM) if is_flax_available() else () all_generative_model_classes = (FlaxXGLMForCausalLM,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxXGLMModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) @slow def test_batch_generation(self): tokenizer = XGLMTokenizer.from_pretrained("XGLM", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="np", padding=True, truncation=True) model = FlaxXGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.config.num_beams = 1 model.config.do_sample = False jit_generate = jax.jit(model.generate) output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of questions, but I'm not sure if I'm", "Hey, I'm a newbie to the forum and I'", ] self.assertListEqual(output_string, expected_string) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/xglm-564M") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
codingutf8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license s the dog is a very friendly dog he is very affectionate and loves to play with other forces the generation to happen on cpu to avoid gpurelated quirks and assure same output regardless of the available devices use different length sentences to test batching coding utf 8 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the dog s the dog is a very friendly dog he is very affectionate and loves to play with other fmt skip forces the generation to happen on cpu to avoid gpu related quirks and assure same output regardless of the available devices use different length sentences to test batching
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class TFXGLMModelTester: config_cls = XGLMConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def get_large_model_config(self): return XGLMConfig.from_pretrained("facebook/xglm-564M") def prepare_config_and_inputs(self): input_ids = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size), clip_value_min=0, clip_value_max=3 ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, ) def get_config(self): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=True, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class TFXGLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () all_generative_model_classes = (TFXGLMForCausalLM,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) test_onnx = False test_missing_keys = False test_pruning = False def setUp(self): self.model_tester = TFXGLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() @slow def test_model_from_pretrained(self): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFXGLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor.") def test_resize_token_embeddings(self): super().test_resize_token_embeddings() @require_tf class TFXGLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xglm(self, verify_outputs=True): model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") input_ids = tf.convert_to_tensor([[2, 268, 9865]], dtype=tf.int32) expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) @slow def test_xglm_sample(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") tf.random.set_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="tf") input_ids = tokenized.input_ids with tf.device(":/CPU:0"): output_ids = model.generate(input_ids, do_sample=True, seed=[7, 0]) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(output_str, EXPECTED_OUTPUT_STR) @slow def test_batch_generation(self): model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") tokenizer.padding_side = "left" sentences = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] inputs = tokenizer(sentences, return_tensors="tf", padding=True) input_ids = inputs["input_ids"] outputs = model.generate(input_ids=input_ids, attention_mask=inputs["attention_mask"], max_new_tokens=12) inputs_non_padded = tokenizer(sentences[0], return_tensors="tf").input_ids output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=12) inputs_padded = tokenizer(sentences[1], return_tensors="tf").input_ids output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=12) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
codingutf8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to nextinputids append to next inputids and tokentypeids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to nextinputids append to next inputids and attnmask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to nextinputids append to next inputids select random slice test that outputs are equal for slice cleanup as much as possible gpu memory occupied by pytorch s the dog is a very friendly dog he is very affectionate and loves to play with other use different length sentences to test batching todo remove this once we move to torch 2 0 torch 1 13 1 cu116 torch 2 0 cu117 coding utf 8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and token_type_ids select random slice test that outputs are equal for slice create attention mask first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids and attn_mask get two different outputs select random slice test that outputs are equal for slice first forward pass create hypothetical next token and extent to next_input_ids append to next input_ids select random slice test that outputs are equal for slice clean up as much as possible gpu memory occupied by pytorch the dog s the dog is a very friendly dog he is very affectionate and loves to play with other fmt skip use different length sentences to test batching todo remove this once we move to torch 2 0 torch 1 13 1 cu116 torch 2 0 cu117 the first logits could contain nans if it fails
import datetime import gc import math import unittest from transformers import XGLMConfig, is_torch_available from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMTokenizer class XGLMModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def get_large_model_config(self): return XGLMConfig.from_pretrained("facebook/xglm-564M") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config(gradient_checkpointing=gradient_checkpointing) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_xglm_model(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, head_mask=head_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.num_hidden_layers) def create_and_check_xglm_model_past(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, use_cache=True) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_xglm_model_attention_mask_past(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 output, past = model(input_ids, attention_mask=attn_mask).to_tuple() next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.zeros((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_xglm_model_past_large_inputs(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=1) next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[ "last_hidden_state" ] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, *args): model = XGLMForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, *args, gradient_checkpointing=False ): model = XGLMForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_xglm_weight_initialization(self, config, *args): model = XGLMModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class XGLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (XGLMModel, XGLMForCausalLM) if is_torch_available() else () all_generative_model_classes = (XGLMForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": XGLMModel, "text-generation": XGLMForCausalLM} if is_torch_available() else {} ) fx_compatible = True test_missing_keys = False test_pruning = False def setUp(self): self.model_tester = XGLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_xglm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model(*config_and_inputs) def test_xglm_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_past(*config_and_inputs) def test_xglm_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_attention_mask_past(*config_and_inputs) def test_xglm_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_past_large_inputs(*config_and_inputs) def test_xglm_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_xglm_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_xglm_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_weight_initialization(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = XGLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): super().test_model_parallelism() @unittest.skip("This test is currently broken because of safetensors.") def test_tf_from_pt_safetensors(self): pass @require_torch class XGLMModelLanguageGenerationTest(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def _test_lm_generate_xglm_helper( self, gradient_checkpointing=False, verify_outputs=True, ): model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") if gradient_checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) input_ids = torch.tensor([[2, 268, 9865]], dtype=torch.long, device=torch_device) expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @slow def test_batch_generation(self): model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.to(torch_device) tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") tokenizer.padding_side = "left" sentences = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), max_new_tokens=12 ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=12) inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=12) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_lm_generate_xglm(self): self._test_lm_generate_xglm_helper() @slow def test_lm_generate_xglm_with_gradient_checkpointing(self): self._test_lm_generate_xglm_helper(gradient_checkpointing=True) @slow def test_xglm_sample(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt") input_ids = tokenized.input_ids output_ids = model.generate(input_ids, do_sample=True, num_beams=1) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STRS = [ "Today is a nice day and the sun is shining. A nice day with warm rainy", "Today is a nice day and the water is still cold. We just stopped off for some fresh", ] self.assertIn(output_str, EXPECTED_OUTPUT_STRS) @slow def test_xglm_sample_max_time(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt") input_ids = tokenized.input_ids.to(torch_device) MAX_TIME = 0.15 start = datetime.datetime.now() model.generate(input_ids, do_sample=True, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=True, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=None, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=1.25 * MAX_TIME)) @require_torch_accelerator @require_torch_fp16 def test_batched_nan_fp16(self): model_name = "facebook/xglm-564M" tokenizer = XGLMTokenizer.from_pretrained(model_name, use_fast=False, padding_side="left") model = XGLMForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, use_cache=True).to(torch_device) model = model.eval() batch = tokenizer(["Who are you?", "Joe Biden is the president of"], padding=True, return_tensors="pt") input_ids = batch["input_ids"].to(torch_device) attention_mask = batch["attention_mask"].to(torch_device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) self.assertFalse( torch.isnan(outputs.logits[0]).any().item() )
codingutf8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token pad tokenid 1 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 s self assertequalvocabkeys1 pad self assertequallenvocabkeys 1008 def testvocabsizeself self assertequalself gettokenizer vocabsize 1008 def testfulltokenizerself tokenizer xglmtokenizersamplevocab keepaccentstrue tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a t est self assertlistequal tokenizer converttokenstoidstokens value tokenizer fairseqoffset for value in 285 46 10 170 382 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline 9 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s ids tokenizer converttokenstoidstokens self assertlistequal ids value tokenizer fairseqoffset for value in 8 21 84 55 24 19 7 2 602 347 347 347 3 12 66 46 72 80 6 2 4 backtokens tokenizer convertidstotokensids self assertlistequal backtokens spieceunderline i spieceunderline was spieceunderline b or n spieceunderline in spieceunderline unk 2 0 0 0 spieceunderline and spieceunderline this spieceunderline is spieceunderline f al s unk cachedproperty def bigtokenizerself return xglmtokenizer frompretrainedfacebookxglm564m def testpicklablewithoutdiskself with tempfile namedtemporaryfile as f shutil copyfilesamplevocab f name tokenizer xglmtokenizerf name keepaccentstrue pickledtokenizer pickle dumpstokenizer pickle loadspickledtokenizer def testrustandpythonfulltokenizersself if not self testrusttokenizer return tokenizer self gettokenizer rusttokenizer self getrusttokenizer sequence i was born in 92000 and this is fals tokens tokenizer tokenizesequence rusttokens rusttokenizer tokenizesequence self assertlistequaltokens rusttokens ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids rusttokenizer self getrusttokenizer ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids slow def testtokenizationbaseeasysymbolsself symbols hello world originaltokenizerencodings 2 31227 4447 35 self assertlistequaloriginaltokenizerencodings self bigtokenizer encodesymbols slow def testtokenizationbasehardsymbolsself symbols this is a very long text with a lot of weird characters such as also we will add words that should not exsist and be tokenized to unk such as saoneuhaoesuth originaltokenizerencodings 2 1018 67 11 1988 2617 5631 278 11 3407 48 71630 28085 4 3234 157 13 6 5 6 4 3526 768 15 659 57 298 3983 864 129 21 6 5 13675 377 652 7580 10341 155 2817 422 1666 7 1674 53 113 202277 17892 33 60 87 4 3234 157 61 2667 52376 19 88 23 735 fmt skip self assertlistequaloriginaltokenizerencodings self bigtokenizer encodesymbols slow def testtokenizerintegrationself fmt off expectedencoding inputids 2 108825 1163 15 88010 473 15898 157 13672 1857 312 8 238021 1163 53 13672 1857 312 8 53283 182396 8 18566 16 36733 4101 8 230 244017 122553 7 15 132597 4 293 12511 7610 4 3414 132597 9 4 32361 362 4 734 28512 32569 18 4 32361 26096 14982 73 18715 21433 235261 15 492 12427 16 53 18715 21433 65454 15 23659 563 16 278 597 2843 595 7931 182396 64186 22 886 595 132981 53 25540 3449 43982 39901 5951 878 330 4 27694 80269 312 53 6517 11780 611 20408 5 2 6 132597 67 42897 33 592 8 163729 25540 361 136997 109514 173230 7 501 60 102913 196 5631 235 63243 473 6 231757 74 5277 7905 53 3095 37317 22 454 183874 5 2 268 31298 46530 6 132935 43831 7 597 32 24 3688 9865 5 attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 noqa e501 fmt on self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamefacebookxglm564m paddingfalse coding utf 8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token fmt skip fmt off noqa e501 fmt on
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class XGLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XGLMTokenizer rust_tokenizer_class = XGLMTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(len(vocab_keys), 1_008) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_008) def test_full_tokenizer(self): tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def big_tokenizer(self): return XGLMTokenizer.from_pretrained("facebook/xglm-564M") def test_picklable_without_disk(self): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SAMPLE_VOCAB, f.name) tokenizer = XGLMTokenizer(f.name, keep_accents=True) pickled_tokenizer = pickle.dumps(tokenizer) pickle.loads(pickled_tokenizer) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [2, 31227, 4447, 35] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) original_tokenizer_encodings = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenizer_integration(self): expected_encoding = { 'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="facebook/xglm-564M", padding=False, )
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer todopvp this and other inputids i tried for generation give pretty bad results not sure why model might just not be made for autoregressive inference coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license small variation of seq_length todo pvp check other models whether language generation is also applicable todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer the president the president the president the president the president the president the president the president the president the president the president todo pvp this and other input_ids i tried for generation give pretty bad results not sure why model might just not be made for auto regressive inference
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMModel, TFXLMWithLMHeadModel, XLMConfig, ) class TFXLMModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_lengths = True self.use_token_type_ids = True self.use_labels = True self.gelu_activation = True self.sinusoidal_embeddings = False self.causal = False self.asm = False self.n_langs = 2 self.vocab_size = 99 self.n_special = 0 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.summary_type = "last" self.use_proj = True self.scope = None self.bos_token_id = 0 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.float32) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2, dtype=tf.float32) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = XLMConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, bos_token_id=self.bos_token_id, ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def create_and_check_xlm_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMModel(config=config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_xlm_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMWithLMHeadModel(config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} outputs = model(inputs) result = outputs self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_xlm_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMForQuestionAnsweringSimple(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_xlm_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMForSequenceClassification(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_xlm_for_token_classification( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = TFXLMForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_xlm_for_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = TFXLMForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class TFXLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFXLMModel, TFXLMWithLMHeadModel, TFXLMForSequenceClassification, TFXLMForQuestionAnsweringSimple, TFXLMForTokenClassification, TFXLMForMultipleChoice, ) if is_tf_available() else () ) all_generative_model_classes = ( (TFXLMWithLMHeadModel,) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFXLMModel, "fill-mask": TFXLMWithLMHeadModel, "question-answering": TFXLMForQuestionAnsweringSimple, "text-classification": TFXLMForSequenceClassification, "text-generation": TFXLMWithLMHeadModel, "token-classification": TFXLMForTokenClassification, "zero-shot": TFXLMForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): return True return False def setUp(self): self.model_tester = TFXLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_xlm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*config_and_inputs) def test_xlm_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs) def test_xlm_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*config_and_inputs) def test_xlm_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFXLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFXLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xlm_mlm_en_2048(self): model = TFXLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048") input_ids = tf.convert_to_tensor([[14, 447]], dtype=tf.int32) expected_output_ids = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer xlm has 2 qa models need to manually set the correct labels for one of them here adds pad dummy token check attn size adds pad dummy token check hidden size todopvp this and other inputids i tried for generation give pretty bad results not sure why model might just not be made for autoregressive inference coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license small variation of seq_length todo pvp check other models whether language generation is also applicable todo fix the failed tests qapipelinetests fails for a few models when the slower tokenizer are used the slower tokenizers were never used for pipeline tests before the pipeline testing rework todo check and possibly fix the qapipelinetests with slower tokenizer xlm has 2 qa models need to manually set the correct labels for one of them here adds pad dummy token check attn size adds pad dummy token check hidden size the president the president the president the president the president the president the president the president the president the president the president todo pvp this and other input_ids i tried for generation give pretty bad results not sure why model might just not be made for auto regressive inference
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class XLMModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_sequence_label_size=2, initializer_range=0.02, num_labels=2, num_choices=4, summary_type="last", use_proj=True, scope=None, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_lengths = use_input_lengths self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.vocab_size = vocab_size self.n_special = n_special self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.summary_type = summary_type self.use_proj = use_proj self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def get_config(self): return XLMConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, ) def create_and_check_xlm_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, lengths=input_lengths, langs=token_type_ids) result = model(input_ids, langs=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_xlm_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMWithLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_xlm_simple_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() outputs = model(input_ids) outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) result = outputs self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_xlm_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) (total_loss,) = result_with_labels.to_tuple() result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def create_and_check_xlm_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_xlm_token_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = XLMForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_xlm_for_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = XLMForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class XLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) all_generative_model_classes = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = XLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_xlm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*config_and_inputs) def test_xlm_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs) def test_xlm_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*config_and_inputs) def test_xlm_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*config_and_inputs) def test_xlm_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) def test_xlm_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*config_and_inputs) def test_xlm_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length + idx + 1 src_len = min_length + idx + 1 expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length + idx + 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) pass @slow def test_model_from_pretrained(self): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = XLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class XLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xlm_mlm_en_2048(self): model = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048") model.to(torch_device) input_ids = torch.tensor([[14, 447]], dtype=torch.long, device=torch_device) expected_output_ids = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].cpu().numpy().tolist(), expected_output_ids)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license adapted from sennrich et al 2015 and https github comrsennrichsubwordnmt adapted from sennrich et al 2015 and https github comrsennrichsubwordnmt tokenizer xlmtokenizerself vocabfile self mergesfile text lower bpetokens low erw tokens tokenizer tokenizetext self assertlistequaltokens bpetokens inputtokens tokens unk inputbpetokens 14 15 20 self assertlistequaltokenizer converttokenstoidsinputtokens inputbpetokens slow def testsequencebuildersself tokenizer xlmtokenizer frompretrainedxlmmlmen2048 text tokenizer encodesequence builders addspecialtokensfalse text2 tokenizer encodemultisequence build addspecialtokensfalse encodedsentence tokenizer buildinputswithspecialtokenstext encodedpair tokenizer buildinputswithspecialtokenstext text2 assert encodedsentence 0 text 1 assert encodedpair 0 text 1 text2 1 coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license adapted from sennrich et al 2015 and https github com rsennrich subword nmt adapted from sennrich et al 2015 and https github com rsennrich subword nmt
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLMTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = XLMTokenizer(self.vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = XLMTokenizer.from_pretrained("xlm-mlm-en-2048") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_2 + [1]
codingutf8 2020 the huggingface inc team the microsoft research team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license encoderdecoder outputs encoder outputs decoder outputs compare the actual values for a slice coding utf 8 2020 the huggingface inc team the microsoft research team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license encoder decoder outputs encoder outputs decoder outputs compare the actual values for a slice
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device if is_torch_available(): import torch from transformers import XLMProphetNetForConditionalGeneration, XLMProphetNetTokenizer @require_torch class XLMProphetNetModelIntegrationTest(unittest.TestCase): @slow def test_pretrained_checkpoint_hidden_states(self): model = XLMProphetNetForConditionalGeneration.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") model.to(torch_device) encoder_ids = torch.tensor([[17, 96208, 103471, 2]]).to(torch_device) decoder_prev_ids = torch.tensor( [[2, 250, 9953, 34, 69489, 1620, 32, 118424, 624, 210, 105, 2913, 1032, 351]] ).to(torch_device) output = model( input_ids=encoder_ids, attention_mask=None, encoder_outputs=None, decoder_input_ids=decoder_prev_ids ) output_predited_logis = output[0] expected_shape = torch.Size((1, 14, 250012)) self.assertEqual(output_predited_logis.shape, expected_shape) expected_slice = torch.tensor( [[[-6.3986, -8.2391, 12.5189], [-6.3289, -8.0864, 12.6211], [-6.2418, -8.0445, 12.7968]]] ).to(torch_device) self.assertTrue(torch.allclose(output_predited_logis[:, :3, :3], expected_slice, atol=1e-4)) encoder_outputs = model.prophetnet.encoder(encoder_ids)[0] expected_encoder_outputs_slice = torch.tensor( [[[-1.4260, -0.7628, 0.8453], [-1.4719, -0.1391, 0.7807], [-1.7678, 0.0114, 0.4646]]] ).to(torch_device) expected_shape_encoder = torch.Size((1, 4, 1024)) self.assertEqual(encoder_outputs.shape, expected_shape_encoder) self.assertTrue(torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4)) decoder_outputs = model.prophetnet.decoder( decoder_prev_ids, encoder_hidden_states=encoder_outputs, ) predicting_streams = decoder_outputs[1].view(1, model.config.ngram, 14, -1) predicting_streams_logits = model.lm_head(predicting_streams) next_first_stream_logits = predicting_streams_logits[:, 0] self.assertTrue(torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_ntg_hidden_states(self): model = XLMProphetNetForConditionalGeneration.from_pretrained( "microsoft/xprophetnet-large-wiki100-cased-xglue-ntg" ) model.to(torch_device) encoder_ids = torch.tensor([[17, 96208, 103471, 2]]).to(torch_device) decoder_prev_ids = torch.tensor( [[2, 250, 9953, 34, 69489, 1620, 32, 118424, 624, 210, 105, 2913, 1032, 351]] ).to(torch_device) output = model( input_ids=encoder_ids, attention_mask=None, encoder_outputs=None, decoder_input_ids=decoder_prev_ids ) output_predited_logis = output[0] expected_shape = torch.Size((1, 14, 250012)) self.assertEqual(output_predited_logis.shape, expected_shape) expected_slice = torch.tensor( [[[-9.2253, -9.7173, -6.3529], [-7.6701, -9.0145, -1.9382], [-8.0195, -7.0004, -0.1523]]] ).to(torch_device) self.assertTrue(torch.allclose(output_predited_logis[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_xprophetnet_ntg_inference(self): model = XLMProphetNetForConditionalGeneration.from_pretrained( "microsoft/xprophetnet-large-wiki100-cased-xglue-ntg" ) model.to(torch_device) model.config.max_length = 512 tokenizer = XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased-xglue-ntg") EN_SENTENCE = ( "Microsoft Corporation intends to officially end free support for the Windows 7 operating system after" " January 14, 2020, according to the official portal of the organization. From that day, users of this" " system will not be able to receive security updates, which could make their computers vulnerable to" " cyber attacks." ) RU_SENTENCE = ( "орпорация Microsoft намерена официально прекратить бесплатную поддержку операционной системы Windows 7" " после 14 января 2020 года, сообщается на официальном портале организации . С указанного дня пользователи" " этой системы не смогут получать обновления безопасности, из-за чего их компьютеры могут стать уязвимыми" " к кибератакам." ) ZH_SENTENCE = "根据该组织的官方门户网站,微软公司打算在2020年1月14日之后正式终止对Windows 7操作系统的免费支持。从那时起,该系统的用户将无法接收安全更新,这可能会使他们的计算机容易受到网络攻击。" input_ids = tokenizer( [EN_SENTENCE, RU_SENTENCE, ZH_SENTENCE], padding=True, max_length=255, return_tensors="pt" ).input_ids input_ids = input_ids.to(torch_device) summary_ids = model.generate( input_ids, num_beams=10, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) generated_titles = [tokenizer.decode(g, skip_special_tokens=True) for g in summary_ids] EXPECTED_TITLE_EN = "Microsoft to end Windows 7 free support after January 14, 2020" EXPECTED_TITLE_RU = "Microsoft намерена прекратить бесплатную поддержку Windows 7 после 14 января 2020 года" EXPECTED_TITLE_ZH = "微软打算终止对Windows 7操作系统的免费支持" self.assertListEqual( [EXPECTED_TITLE_EN, EXPECTED_TITLE_RU, EXPECTED_TITLE_ZH], generated_titles, ) summary_ids_beam1 = model.generate( input_ids, num_beams=1, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) generated_titles_beam1_tok = [ tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True) for g in summary_ids_beam1 ] EXPECTED_TITLE_EN_BEAM1_TOK = "▁Microsoft ▁to ▁end ▁free ▁support ▁for ▁Windows ▁7".split(" ") EXPECTED_TITLE_RU_BEAM1_TOK = "▁Microsoft ▁намерен а ▁прекрати ть ▁бес плат ную ▁поддержку ▁Windows ▁7 ▁после ▁14 ▁января ▁2020 ▁года".split( " " ) EXPECTED_TITLE_ZH_BEAM1_TOK = "微软 公司 打算 终止 对 Windows ▁7 操作 系统的 免费 支持".split(" ") self.assertListEqual( [EXPECTED_TITLE_EN_BEAM1_TOK, EXPECTED_TITLE_RU_BEAM1_TOK, EXPECTED_TITLE_ZH_BEAM1_TOK], generated_titles_beam1_tok, )