|
import jax.numpy as jnp |
|
from transformers import AutoFeatureExtractor, AutoTokenizer |
|
from models.modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel |
|
from flax.traverse_util import flatten_dict, unflatten_dict |
|
|
|
encoder_id = "hf-internal-testing/tiny-random-wav2vec2" |
|
decoder_id = "hf-internal-testing/tiny-random-bart" |
|
|
|
unrolled_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, encoder_from_pt=True, decoder_from_pt=True, encoder_use_scan=False, decoder_use_scan=False) |
|
model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, encoder_from_pt=True, decoder_from_pt=True, encoder_use_scan=True, decoder_use_scan=True) |
|
|
|
model.config.encoder.feat_proj_dropout = 0.0 |
|
model.config.encoder.final_dropout = 0.0 |
|
model.config.encoder.mask_time_prob = 0.1 |
|
model.config.decoder_start_token_id = model.config.decoder.bos_token_id |
|
model.config.pad_token_id = model.config.decoder.pad_token_id |
|
model.config.eos_token_id = model.config.decoder.eos_token_id |
|
model.config.max_length = 40 |
|
model.config.num_beams = 1 |
|
model.config.encoder.layerdrop = 0.0 |
|
model.config.use_cache = False |
|
model.config.processor_class = "Wav2Vec2Processor" |
|
|
|
def unrolled_to_scanned(model): |
|
params = model.params |
|
new_enc_params = {} |
|
|
|
for k in flatten_dict(params['encoder']['encoder']['layers']['0']): |
|
|
|
new_enc_params[k] = jnp.stack([flatten_dict(params['encoder']['encoder']['layers'][str(i)])[k] for i in range(model.config.encoder.num_hidden_layers)]) |
|
|
|
new_enc_params = unflatten_dict({('encoder', 'layers', 'FlaxWav2Vec2EncoderLayers'): unflatten_dict(new_enc_params)}) |
|
|
|
|
|
new_dec_params = {} |
|
for k in flatten_dict(params['decoder']['model']['decoder']['layers']['0']): |
|
new_dec_params[k] = jnp.stack([flatten_dict(params['decoder']['model']['decoder']['layers'][str(i)])[k] for i in range(model.config.decoder.decoder_layers)]) |
|
new_dec_params = unflatten_dict({('model', 'decoder', 'layers', 'FlaxBartDecoderLayers'): unflatten_dict(new_dec_params)}) |
|
|
|
|
|
new_params = {'encoder': new_enc_params, 'decoder': new_dec_params} |
|
new_params = flatten_dict(new_params) |
|
|
|
|
|
for k in flatten_dict(params): |
|
if 'layers' not in k or 'adapter' in k: |
|
new_params[k] = flatten_dict(params)[k] |
|
|
|
return unflatten_dict(new_params) |
|
|
|
model.params = unrolled_to_scanned(unrolled_model) |
|
|
|
|
|
out = model.generate(jnp.ones((1, 2000))) |
|
|
|
model.save_pretrained("./") |
|
|
|
feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id) |
|
feature_extractor.save_pretrained("./") |
|
tokenizer = AutoTokenizer.from_pretrained(decoder_id) |
|
tokenizer.save_pretrained("./") |
|
|