{ "_name_or_path": "THUDM/glm-4-voice-tokenizer", "activation_dropout": 0.0, "activation_function": "gelu", "apply_spec_augment": false, "architectures": [ "WhisperVQEncoder" ], "attention_dropout": 0.0, "begin_suppress_tokens": [ 220, 50257 ], "bos_token_id": 50257, "classifier_proj_size": 256, "d_model": 1280, "decoder_attention_heads": 20, "decoder_ffn_dim": 5120, "decoder_layerdrop": 0.0, "decoder_layers": 32, "decoder_start_token_id": 50258, "dropout": 0.0, "encoder_attention_heads": 20, "encoder_causal_attention": false, "encoder_causal_convolution": true, "encoder_ffn_dim": 5120, "encoder_layerdrop": 0.0, "encoder_layers": 32, "eos_token_id": 50257, "init_std": 0.02, "is_encoder_decoder": true, "mask_feature_length": 10, "mask_feature_min_masks": 0, "mask_feature_prob": 0.0, "mask_time_length": 10, "mask_time_min_masks": 2, "mask_time_prob": 0.05, "max_length": 448, "max_source_positions": 1500, "max_target_positions": 448, "median_filter_width": 7, "model_type": "whisper", "num_hidden_layers": 32, "num_mel_bins": 128, "pad_token_id": 50256, "pooling_kernel_size": 4, "pooling_position": 16, "pooling_type": "avg", "quantize_causal_block_size": 200, "quantize_causal_encoder": false, "quantize_commit_coefficient": 0.25, "quantize_ema_decay": 0.99, "quantize_encoder_only": true, "quantize_loss_scale": 10.0, "quantize_position": 16, "quantize_restart_interval": 100, "quantize_vocab_size": 16384, "scale_embedding": false, "skip_language_detection": true, "torch_dtype": "float32", "transformers_version": "4.44.1", "use_cache": true, "use_weighted_layer_sum": false, "vocab_size": 51866 }